VmapGeneratedPlumbing.h
1    
2    #pragma once
3    #include <ATen/Operators.h>
4    #include <ATen/functorch/PlumbingHelper.h>
5    
6    namespace at { namespace functorch {
7    
8    template <typename batch_rule_t, batch_rule_t batch_rule>
9    at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) {
10     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11     auto maybe_layer = maybeCurrentDynamicLayer();
12     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13     int64_t cur_level = maybe_layer->layerId();
14     if (!isBatchedAtLevel(self, cur_level)) {
15       return at::_ops::_cast_Byte::call(self, non_blocking);
16     }
17     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18     auto results = batch_rule(self_value, self_bdim, non_blocking);
19     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20   }
21   template <typename batch_rule_t, batch_rule_t batch_rule>
22   at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) {
23     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24     auto maybe_layer = maybeCurrentDynamicLayer();
25     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26     int64_t cur_level = maybe_layer->layerId();
27     if (!isBatchedAtLevel(self, cur_level)) {
28       return at::_ops::_cast_Char::call(self, non_blocking);
29     }
30     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
31     auto results = batch_rule(self_value, self_bdim, non_blocking);
32     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
33   }
34   template <typename batch_rule_t, batch_rule_t batch_rule>
35   at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) {
36     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
37     auto maybe_layer = maybeCurrentDynamicLayer();
38     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
39     int64_t cur_level = maybe_layer->layerId();
40     if (!isBatchedAtLevel(self, cur_level)) {
41       return at::_ops::_cast_Double::call(self, non_blocking);
42     }
43     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
44     auto results = batch_rule(self_value, self_bdim, non_blocking);
45     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
46   }
47   template <typename batch_rule_t, batch_rule_t batch_rule>
48   at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) {
49     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
50     auto maybe_layer = maybeCurrentDynamicLayer();
51     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
52     int64_t cur_level = maybe_layer->layerId();
53     if (!isBatchedAtLevel(self, cur_level)) {
54       return at::_ops::_cast_Float::call(self, non_blocking);
55     }
56     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
57     auto results = batch_rule(self_value, self_bdim, non_blocking);
58     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
59   }
60   template <typename batch_rule_t, batch_rule_t batch_rule>
61   at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) {
62     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
63     auto maybe_layer = maybeCurrentDynamicLayer();
64     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
65     int64_t cur_level = maybe_layer->layerId();
66     if (!isBatchedAtLevel(self, cur_level)) {
67       return at::_ops::_cast_Int::call(self, non_blocking);
68     }
69     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
70     auto results = batch_rule(self_value, self_bdim, non_blocking);
71     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
72   }
73   template <typename batch_rule_t, batch_rule_t batch_rule>
74   at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) {
75     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
76     auto maybe_layer = maybeCurrentDynamicLayer();
77     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
78     int64_t cur_level = maybe_layer->layerId();
79     if (!isBatchedAtLevel(self, cur_level)) {
80       return at::_ops::_cast_Long::call(self, non_blocking);
81     }
82     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
83     auto results = batch_rule(self_value, self_bdim, non_blocking);
84     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
85   }
86   template <typename batch_rule_t, batch_rule_t batch_rule>
87   at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) {
88     c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
89     auto maybe_layer = maybeCurrentDynamicLayer();
90     vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
91     int64_t cur_level = maybe_layer->layerId();
92     if (!isBatchedAtLevel(self, cur_level)) {
93       return at::_ops::_cast_Short::call(self, non_blocking);
94     }
95     auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
96     auto results = batch_rule(self_value, self_bdim, non_blocking);
97     return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
98   }
99   template <typename batch_rule_t, batch_rule_t batch_rule>
100  at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) {
101    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
102    auto maybe_layer = maybeCurrentDynamicLayer();
103    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
104    int64_t cur_level = maybe_layer->layerId();
105    if (!isBatchedAtLevel(self, cur_level)) {
106      return at::_ops::_cast_Half::call(self, non_blocking);
107    }
108    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
109    auto results = batch_rule(self_value, self_bdim, non_blocking);
110    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
111  }
112  template <typename batch_rule_t, batch_rule_t batch_rule>
113  void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const ::std::optional<at::Tensor> & gradient, ::std::optional<bool> retain_graph, bool create_graph) {
114    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
115    auto maybe_layer = maybeCurrentDynamicLayer();
116    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
117    int64_t cur_level = maybe_layer->layerId();
118    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) {
119      return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph);
120    }
121    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
122    std::optional<Tensor> gradient_value;
123    std::optional<int64_t> gradient_bdim;
124    if (gradient) {
125        std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level);
126    }
127    batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph);
128  }
129  template <typename batch_rule_t, batch_rule_t batch_rule>
130  void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) {
131    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
132    auto maybe_layer = maybeCurrentDynamicLayer();
133    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
134    int64_t cur_level = maybe_layer->layerId();
135    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) {
136      return at::_ops::set_data::call(self, new_data);
137    }
138    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
139    auto [new_data_value, new_data_bdim] = unwrapTensorAtLevel(new_data, cur_level);
140    batch_rule(self_value, self_bdim, new_data_value, new_data_bdim);
141  }
142  template <typename batch_rule_t, batch_rule_t batch_rule>
143  at::Tensor data_generated_plumbing(const at::Tensor & self) {
144    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
145    auto maybe_layer = maybeCurrentDynamicLayer();
146    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
147    int64_t cur_level = maybe_layer->layerId();
148    if (!isBatchedAtLevel(self, cur_level)) {
149      return at::_ops::data::call(self);
150    }
151    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
152    auto results = batch_rule(self_value, self_bdim);
153    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
154  }
155  template <typename batch_rule_t, batch_rule_t batch_rule>
156  at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) {
157    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
158    auto maybe_layer = maybeCurrentDynamicLayer();
159    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
160    int64_t cur_level = maybe_layer->layerId();
161    if (!isBatchedAtLevel(self, cur_level)) {
162      return at::_ops::requires_grad_::call(self, requires_grad);
163    }
164    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
165    batch_rule(self_value, self_bdim, requires_grad);
166    return self;
167  }
168  template <typename batch_rule_t, batch_rule_t batch_rule>
169  void retain_grad_generated_plumbing(at::Tensor & self) {
170    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
171    auto maybe_layer = maybeCurrentDynamicLayer();
172    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
173    int64_t cur_level = maybe_layer->layerId();
174    if (!isBatchedAtLevel(self, cur_level)) {
175      return at::_ops::retain_grad::call(self);
176    }
177    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
178    batch_rule(self_value, self_bdim);
179  }
180  template <typename batch_rule_t, batch_rule_t batch_rule>
181  at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) {
182    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
183    auto maybe_layer = maybeCurrentDynamicLayer();
184    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
185    int64_t cur_level = maybe_layer->layerId();
186    if (!isBatchedAtLevel(self, cur_level)) {
187      return at::_ops::_fw_primal::call(self, level);
188    }
189    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
190    auto results = batch_rule(self_value, self_bdim, level);
191    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
192  }
193  template <typename batch_rule_t, batch_rule_t batch_rule>
194  at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
195    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
196    auto maybe_layer = maybeCurrentDynamicLayer();
197    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
198    int64_t cur_level = maybe_layer->layerId();
199    if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
200      return at::_ops::_make_dual::call(primal, tangent, level);
201    }
202    auto [primal_value, primal_bdim] = unwrapTensorAtLevel(primal, cur_level);
203    auto [tangent_value, tangent_bdim] = unwrapTensorAtLevel(tangent, cur_level);
204    auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
205    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
206  }
207  template <typename batch_rule_t, batch_rule_t batch_rule>
208  ::std::tuple<at::Tensor,at::Tensor> _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) {
209    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
210    auto maybe_layer = maybeCurrentDynamicLayer();
211    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
212    int64_t cur_level = maybe_layer->layerId();
213    if (!isBatchedAtLevel(dual, cur_level)) {
214      return at::_ops::_unpack_dual::call(dual, level);
215    }
216    auto [dual_value, dual_bdim] = unwrapTensorAtLevel(dual, cur_level);
217    auto results = batch_rule(dual_value, dual_bdim, level);
218    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
219  }
220  template <typename batch_rule_t, batch_rule_t batch_rule>
221  at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
222    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
223    auto maybe_layer = maybeCurrentDynamicLayer();
224    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
225    int64_t cur_level = maybe_layer->layerId();
226    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
227      return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims);
228    }
229    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
230    auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
231    auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims);
232    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
233  }
234  template <typename batch_rule_t, batch_rule_t batch_rule>
235  at::Tensor rename_generated_plumbing(const at::Tensor & self, ::std::optional<at::DimnameList> names) {
236    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
237    auto maybe_layer = maybeCurrentDynamicLayer();
238    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
239    int64_t cur_level = maybe_layer->layerId();
240    if (!isBatchedAtLevel(self, cur_level)) {
241      return at::_ops::rename::call(self, names);
242    }
243    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
244    auto results = batch_rule(self_value, self_bdim, names);
245    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
246  }
247  template <typename batch_rule_t, batch_rule_t batch_rule>
248  at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
249    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
250    auto maybe_layer = maybeCurrentDynamicLayer();
251    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
252    int64_t cur_level = maybe_layer->layerId();
253    if (!isBatchedAtLevel(self, cur_level)) {
254      return at::_ops::align_to::call(self, names);
255    }
256    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
257    auto results = batch_rule(self_value, self_bdim, names);
258    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
259  }
260  template <typename batch_rule_t, batch_rule_t batch_rule>
261  at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
262    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
263    auto maybe_layer = maybeCurrentDynamicLayer();
264    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
265    int64_t cur_level = maybe_layer->layerId();
266    if (!isBatchedAtLevel(self, cur_level)) {
267      return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx);
268    }
269    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
270    auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx);
271    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
272  }
273  template <typename batch_rule_t, batch_rule_t batch_rule>
274  at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
275    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
276    auto maybe_layer = maybeCurrentDynamicLayer();
277    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
278    int64_t cur_level = maybe_layer->layerId();
279    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
280      return at::_ops::align_as::call(self, other);
281    }
282    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
283    auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
284    auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
285    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
286  }
287  template <typename batch_rule_t, batch_rule_t batch_rule>
288  ::std::vector<at::Tensor> align_tensors_generated_plumbing(at::TensorList tensors) {
289    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
290    auto maybe_layer = maybeCurrentDynamicLayer();
291    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
292    int64_t cur_level = maybe_layer->layerId();
293    if (!isBatchedAtLevel(tensors, cur_level)) {
294      return at::_ops::align_tensors::call(tensors);
295    }
296  
297    auto results = batch_rule(tensors);
298    return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
299  }
300  template <typename batch_rule_t, batch_rule_t batch_rule>
301  void _assert_async_generated_plumbing(const at::Tensor & self) {
302    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
303    auto maybe_layer = maybeCurrentDynamicLayer();
304    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
305    int64_t cur_level = maybe_layer->layerId();
306    if (!isBatchedAtLevel(self, cur_level)) {
307      return at::_ops::_assert_async::call(self);
308    }
309    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
310    batch_rule(self_value, self_bdim);
311  }
312  template <typename batch_rule_t, batch_rule_t batch_rule>
313  void _assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg) {
314    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
315    auto maybe_layer = maybeCurrentDynamicLayer();
316    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
317    int64_t cur_level = maybe_layer->layerId();
318    if (!isBatchedAtLevel(self, cur_level)) {
319      return at::_ops::_assert_async_msg::call(self, assert_msg);
320    }
321    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
322    batch_rule(self_value, self_bdim, assert_msg);
323  }
324  template <typename batch_rule_t, batch_rule_t batch_rule>
325  at::Tensor _functional_assert_scalar_generated_plumbing(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
326    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
327    auto maybe_layer = maybeCurrentDynamicLayer();
328    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
329    int64_t cur_level = maybe_layer->layerId();
330    if (!isBatchedAtLevel(dep_token, cur_level)) {
331      return at::_ops::_functional_assert_scalar::call(self, assert_msg, dep_token);
332    }
333    auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level);
334    auto results = batch_rule(self, assert_msg, dep_token_value, dep_token_bdim);
335    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
336  }
337  template <typename batch_rule_t, batch_rule_t batch_rule>
338  at::Tensor _functional_assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
339    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
340    auto maybe_layer = maybeCurrentDynamicLayer();
341    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
342    int64_t cur_level = maybe_layer->layerId();
343    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dep_token, cur_level)) {
344      return at::_ops::_functional_assert_async_msg::call(self, assert_msg, dep_token);
345    }
346    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
347    auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level);
348    auto results = batch_rule(self_value, self_bdim, assert_msg, dep_token_value, dep_token_bdim);
349    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
350  }
351  template <typename batch_rule_t, batch_rule_t batch_rule>
352  void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Device> device, ::std::optional<at::Layout> layout) {
353    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
354    auto maybe_layer = maybeCurrentDynamicLayer();
355    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
356    int64_t cur_level = maybe_layer->layerId();
357    if (!isBatchedAtLevel(a, cur_level)) {
358      return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype, device, layout);
359    }
360    auto [a_value, a_bdim] = unwrapTensorAtLevel(a, cur_level);
361    batch_rule(a_value, a_bdim, size, stride, dtype, device, layout);
362  }
363  template <typename batch_rule_t, batch_rule_t batch_rule>
364  at::Tensor _functional_sym_constrain_range_generated_plumbing(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
365    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
366    auto maybe_layer = maybeCurrentDynamicLayer();
367    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
368    int64_t cur_level = maybe_layer->layerId();
369    if (!isBatchedAtLevel(dep_token, cur_level)) {
370      return at::_ops::_functional_sym_constrain_range::call(size, min, max, dep_token);
371    }
372    auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level);
373    auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim);
374    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
375  }
376  template <typename batch_rule_t, batch_rule_t batch_rule>
377  at::Tensor _functional_sym_constrain_range_for_size_generated_plumbing(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
378    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
379    auto maybe_layer = maybeCurrentDynamicLayer();
380    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
381    int64_t cur_level = maybe_layer->layerId();
382    if (!isBatchedAtLevel(dep_token, cur_level)) {
383      return at::_ops::_functional_sym_constrain_range_for_size::call(size, min, max, dep_token);
384    }
385    auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level);
386    auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim);
387    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
388  }
389  template <typename batch_rule_t, batch_rule_t batch_rule>
390  at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
391    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
392    auto maybe_layer = maybeCurrentDynamicLayer();
393    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
394    int64_t cur_level = maybe_layer->layerId();
395    if (!isBatchedAtLevel(self, cur_level)) {
396      return at::_ops::refine_names::call(self, names);
397    }
398    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
399    auto results = batch_rule(self_value, self_bdim, names);
400    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
401  }
402  template <typename batch_rule_t, batch_rule_t batch_rule>
403  ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
404    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
405    auto maybe_layer = maybeCurrentDynamicLayer();
406    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
407    int64_t cur_level = maybe_layer->layerId();
408    if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
409      return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
410    }
411    auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
412    auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
413    auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity);
414    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
415  }
416  template <typename batch_rule_t, batch_rule_t batch_rule>
417  ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
418    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
419    auto maybe_layer = maybeCurrentDynamicLayer();
420    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
421    int64_t cur_level = maybe_layer->layerId();
422    if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
423      return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
424    }
425    auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
426    auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
427    auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level);
428    auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level);
429    auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity);
430    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
431  }
432  template <typename batch_rule_t, batch_rule_t batch_rule>
433  at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
434    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
435    auto maybe_layer = maybeCurrentDynamicLayer();
436    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
437    int64_t cur_level = maybe_layer->layerId();
438    if (!isBatchedAtLevel(weight_arr, cur_level)) {
439      return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
440    }
441  
442    auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
443    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
444  }
445  template <typename batch_rule_t, batch_rule_t batch_rule>
446  ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
447    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
448    auto maybe_layer = maybeCurrentDynamicLayer();
449    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
450    int64_t cur_level = maybe_layer->layerId();
451    if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
452      return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
453    }
454    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
455    auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
456    std::optional<Tensor> weight_buf_value;
457    std::optional<int64_t> weight_buf_bdim;
458    if (weight_buf) {
459        std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level);
460    }
461    std::optional<Tensor> cx_value;
462    std::optional<int64_t> cx_bdim;
463    if (cx) {
464        std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
465    }
466    std::optional<Tensor> dropout_state_value;
467    std::optional<int64_t> dropout_state_bdim;
468    if (dropout_state) {
469        std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
470    }
471    auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
472    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
473  }
474  template <typename batch_rule_t, batch_rule_t batch_rule>
475  ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
476    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
477    auto maybe_layer = maybeCurrentDynamicLayer();
478    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
479    int64_t cur_level = maybe_layer->layerId();
480    if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
481      return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
482    }
483    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
484    auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level);
485    auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
486    auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
487    auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level);
488    std::optional<Tensor> cx_value;
489    std::optional<int64_t> cx_bdim;
490    if (cx) {
491        std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
492    }
493    std::optional<Tensor> grad_output_value;
494    std::optional<int64_t> grad_output_bdim;
495    if (grad_output) {
496        std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
497    }
498    std::optional<Tensor> grad_hy_value;
499    std::optional<int64_t> grad_hy_bdim;
500    if (grad_hy) {
501        std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
502    }
503    std::optional<Tensor> grad_cy_value;
504    std::optional<int64_t> grad_cy_bdim;
505    if (grad_cy) {
506        std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
507    }
508    std::optional<Tensor> dropout_state_value;
509    std::optional<int64_t> dropout_state_bdim;
510    if (dropout_state) {
511        std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
512    }
513    auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
514    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
515  }
516  template <typename batch_rule_t, batch_rule_t batch_rule>
517  ::std::tuple<at::Tensor,at::Tensor> _fused_dropout_generated_plumbing(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
518    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
519    auto maybe_layer = maybeCurrentDynamicLayer();
520    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
521    int64_t cur_level = maybe_layer->layerId();
522    if (!isBatchedAtLevel(self, cur_level)) {
523      return at::_ops::_fused_dropout::call(self, p, generator);
524    }
525    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
526    auto results = batch_rule(self_value, self_bdim, p, generator);
527    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
528  }
529  template <typename batch_rule_t, batch_rule_t batch_rule>
530  at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) {
531    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
532    auto maybe_layer = maybeCurrentDynamicLayer();
533    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
534    int64_t cur_level = maybe_layer->layerId();
535    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
536      return at::_ops::_masked_scale::call(self, mask, scale);
537    }
538    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
539    auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
540    auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale);
541    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
542  }
543  template <typename batch_rule_t, batch_rule_t batch_rule>
544  ::std::tuple<at::Tensor,at::Tensor> native_dropout_generated_plumbing(const at::Tensor & input, double p, ::std::optional<bool> train) {
545    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
546    auto maybe_layer = maybeCurrentDynamicLayer();
547    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
548    int64_t cur_level = maybe_layer->layerId();
549    if (!isBatchedAtLevel(input, cur_level)) {
550      return at::_ops::native_dropout::call(input, p, train);
551    }
552    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
553    auto results = batch_rule(input_value, input_bdim, p, train);
554    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
555  }
556  template <typename batch_rule_t, batch_rule_t batch_rule>
557  at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
558    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
559    auto maybe_layer = maybeCurrentDynamicLayer();
560    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
561    int64_t cur_level = maybe_layer->layerId();
562    if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
563      return at::_ops::native_dropout_backward::call(grad_output, mask, scale);
564    }
565    auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
566    auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
567    auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale);
568    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
569  }
570  template <typename batch_rule_t, batch_rule_t batch_rule>
571  ::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional<at::ScalarType> dtype) {
572    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
573    auto maybe_layer = maybeCurrentDynamicLayer();
574    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
575    int64_t cur_level = maybe_layer->layerId();
576    if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
577      return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype);
578    }
579    auto [quasi_value, quasi_bdim] = unwrapTensorAtLevel(quasi, cur_level);
580    auto [sobolstate_value, sobolstate_bdim] = unwrapTensorAtLevel(sobolstate, cur_level);
581    auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype);
582    return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
583  }
584  template <typename batch_rule_t, batch_rule_t batch_rule>
585  at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
586    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
587    auto maybe_layer = maybeCurrentDynamicLayer();
588    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
589    int64_t cur_level = maybe_layer->layerId();
590    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
591      return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated);
592    }
593    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
594    auto [sobolstate_value, sobolstate_bdim] = unwrapTensorAtLevel(sobolstate, cur_level);
595    batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated);
596    return self;
597  }
598  template <typename batch_rule_t, batch_rule_t batch_rule>
599  at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
600    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
601    auto maybe_layer = maybeCurrentDynamicLayer();
602    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
603    int64_t cur_level = maybe_layer->layerId();
604    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) {
605      return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
606    }
607    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
608    auto [ltm_value, ltm_bdim] = unwrapTensorAtLevel(ltm, cur_level);
609    batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension);
610    return self;
611  }
612  template <typename batch_rule_t, batch_rule_t batch_rule>
613  at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) {
614    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
615    auto maybe_layer = maybeCurrentDynamicLayer();
616    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
617    int64_t cur_level = maybe_layer->layerId();
618    if (!isBatchedAtLevel(self, cur_level)) {
619      return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
620    }
621    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
622    batch_rule(self_value, self_bdim, dimension);
623    return self;
624  }
625  template <typename batch_rule_t, batch_rule_t batch_rule>
626  at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) {
627    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
628    auto maybe_layer = maybeCurrentDynamicLayer();
629    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
630    int64_t cur_level = maybe_layer->layerId();
631    if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) {
632      return at::_ops::_reshape_from_tensor::call(self, shape);
633    }
634    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
635    auto [shape_value, shape_bdim] = unwrapTensorAtLevel(shape, cur_level);
636    auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim);
637    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
638  }
639  template <typename batch_rule_t, batch_rule_t batch_rule>
640  at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) {
641    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
642    auto maybe_layer = maybeCurrentDynamicLayer();
643    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
644    int64_t cur_level = maybe_layer->layerId();
645    if (!isBatchedAtLevel(self, cur_level)) {
646      return at::_ops::_shape_as_tensor::call(self);
647    }
648    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
649    auto results = batch_rule(self_value, self_bdim);
650    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
651  }
652  template <typename batch_rule_t, batch_rule_t batch_rule>
653  at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
654    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
655    auto maybe_layer = maybeCurrentDynamicLayer();
656    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
657    int64_t cur_level = maybe_layer->layerId();
658    if (!isBatchedAtLevel(input, cur_level)) {
659      return at::_ops::dropout::call(input, p, train);
660    }
661    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
662    auto results = batch_rule(input_value, input_bdim, p, train);
663    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
664  }
665  template <typename batch_rule_t, batch_rule_t batch_rule>
666  at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
667    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
668    auto maybe_layer = maybeCurrentDynamicLayer();
669    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
670    int64_t cur_level = maybe_layer->layerId();
671    if (!isBatchedAtLevel(self, cur_level)) {
672      return at::_ops::dropout_::call(self, p, train);
673    }
674    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
675    batch_rule(self_value, self_bdim, p, train);
676    return self;
677  }
678  template <typename batch_rule_t, batch_rule_t batch_rule>
679  at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
680    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
681    auto maybe_layer = maybeCurrentDynamicLayer();
682    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
683    int64_t cur_level = maybe_layer->layerId();
684    if (!isBatchedAtLevel(input, cur_level)) {
685      return at::_ops::feature_dropout::call(input, p, train);
686    }
687    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
688    auto results = batch_rule(input_value, input_bdim, p, train);
689    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
690  }
691  template <typename batch_rule_t, batch_rule_t batch_rule>
692  at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
693    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
694    auto maybe_layer = maybeCurrentDynamicLayer();
695    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
696    int64_t cur_level = maybe_layer->layerId();
697    if (!isBatchedAtLevel(self, cur_level)) {
698      return at::_ops::feature_dropout_::call(self, p, train);
699    }
700    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
701    batch_rule(self_value, self_bdim, p, train);
702    return self;
703  }
704  template <typename batch_rule_t, batch_rule_t batch_rule>
705  at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
706    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
707    auto maybe_layer = maybeCurrentDynamicLayer();
708    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
709    int64_t cur_level = maybe_layer->layerId();
710    if (!isBatchedAtLevel(input, cur_level)) {
711      return at::_ops::alpha_dropout::call(input, p, train);
712    }
713    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
714    auto results = batch_rule(input_value, input_bdim, p, train);
715    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
716  }
717  template <typename batch_rule_t, batch_rule_t batch_rule>
718  at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
719    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
720    auto maybe_layer = maybeCurrentDynamicLayer();
721    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
722    int64_t cur_level = maybe_layer->layerId();
723    if (!isBatchedAtLevel(self, cur_level)) {
724      return at::_ops::alpha_dropout_::call(self, p, train);
725    }
726    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
727    batch_rule(self_value, self_bdim, p, train);
728    return self;
729  }
730  template <typename batch_rule_t, batch_rule_t batch_rule>
731  at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
732    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
733    auto maybe_layer = maybeCurrentDynamicLayer();
734    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
735    int64_t cur_level = maybe_layer->layerId();
736    if (!isBatchedAtLevel(input, cur_level)) {
737      return at::_ops::feature_alpha_dropout::call(input, p, train);
738    }
739    auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
740    auto results = batch_rule(input_value, input_bdim, p, train);
741    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
742  }
743  template <typename batch_rule_t, batch_rule_t batch_rule>
744  at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
745    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
746    auto maybe_layer = maybeCurrentDynamicLayer();
747    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
748    int64_t cur_level = maybe_layer->layerId();
749    if (!isBatchedAtLevel(self, cur_level)) {
750      return at::_ops::feature_alpha_dropout_::call(self, p, train);
751    }
752    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
753    batch_rule(self_value, self_bdim, p, train);
754    return self;
755  }
756  template <typename batch_rule_t, batch_rule_t batch_rule>
757  at::Tensor abs_generated_plumbing(const at::Tensor & self) {
758    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
759    auto maybe_layer = maybeCurrentDynamicLayer();
760    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
761    int64_t cur_level = maybe_layer->layerId();
762    if (!isBatchedAtLevel(self, cur_level)) {
763      return at::_ops::abs::call(self);
764    }
765    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
766    auto results = batch_rule(self_value, self_bdim);
767    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
768  }
769  template <typename batch_rule_t, batch_rule_t batch_rule>
770  at::Tensor & abs__generated_plumbing(at::Tensor & self) {
771    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
772    auto maybe_layer = maybeCurrentDynamicLayer();
773    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
774    int64_t cur_level = maybe_layer->layerId();
775    if (!isBatchedAtLevel(self, cur_level)) {
776      return at::_ops::abs_::call(self);
777    }
778    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
779    batch_rule(self_value, self_bdim);
780    return self;
781  }
782  template <typename batch_rule_t, batch_rule_t batch_rule>
783  at::Tensor absolute_generated_plumbing(const at::Tensor & self) {
784    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
785    auto maybe_layer = maybeCurrentDynamicLayer();
786    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
787    int64_t cur_level = maybe_layer->layerId();
788    if (!isBatchedAtLevel(self, cur_level)) {
789      return at::_ops::absolute::call(self);
790    }
791    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
792    auto results = batch_rule(self_value, self_bdim);
793    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
794  }
795  template <typename batch_rule_t, batch_rule_t batch_rule>
796  at::Tensor & absolute__generated_plumbing(at::Tensor & self) {
797    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
798    auto maybe_layer = maybeCurrentDynamicLayer();
799    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
800    int64_t cur_level = maybe_layer->layerId();
801    if (!isBatchedAtLevel(self, cur_level)) {
802      return at::_ops::absolute_::call(self);
803    }
804    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
805    batch_rule(self_value, self_bdim);
806    return self;
807  }
808  template <typename batch_rule_t, batch_rule_t batch_rule>
809  at::Tensor angle_generated_plumbing(const at::Tensor & self) {
810    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
811    auto maybe_layer = maybeCurrentDynamicLayer();
812    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
813    int64_t cur_level = maybe_layer->layerId();
814    if (!isBatchedAtLevel(self, cur_level)) {
815      return at::_ops::angle::call(self);
816    }
817    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
818    auto results = batch_rule(self_value, self_bdim);
819    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
820  }
821  template <typename batch_rule_t, batch_rule_t batch_rule>
822  at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) {
823    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
824    auto maybe_layer = maybeCurrentDynamicLayer();
825    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
826    int64_t cur_level = maybe_layer->layerId();
827    if (!isBatchedAtLevel(self, cur_level)) {
828      return at::_ops::view_as_real::call(self);
829    }
830    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
831    auto results = batch_rule(self_value, self_bdim);
832    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
833  }
834  template <typename batch_rule_t, batch_rule_t batch_rule>
835  at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) {
836    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
837    auto maybe_layer = maybeCurrentDynamicLayer();
838    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
839    int64_t cur_level = maybe_layer->layerId();
840    if (!isBatchedAtLevel(self, cur_level)) {
841      return at::_ops::view_as_complex::call(self);
842    }
843    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
844    auto results = batch_rule(self_value, self_bdim);
845    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
846  }
847  template <typename batch_rule_t, batch_rule_t batch_rule>
848  at::Tensor sgn_generated_plumbing(const at::Tensor & self) {
849    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
850    auto maybe_layer = maybeCurrentDynamicLayer();
851    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
852    int64_t cur_level = maybe_layer->layerId();
853    if (!isBatchedAtLevel(self, cur_level)) {
854      return at::_ops::sgn::call(self);
855    }
856    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
857    auto results = batch_rule(self_value, self_bdim);
858    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
859  }
860  template <typename batch_rule_t, batch_rule_t batch_rule>
861  at::Tensor & sgn__generated_plumbing(at::Tensor & self) {
862    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
863    auto maybe_layer = maybeCurrentDynamicLayer();
864    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
865    int64_t cur_level = maybe_layer->layerId();
866    if (!isBatchedAtLevel(self, cur_level)) {
867      return at::_ops::sgn_::call(self);
868    }
869    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
870    batch_rule(self_value, self_bdim);
871    return self;
872  }
873  template <typename batch_rule_t, batch_rule_t batch_rule>
874  at::Tensor chalf_generated_plumbing(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
875    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
876    auto maybe_layer = maybeCurrentDynamicLayer();
877    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
878    int64_t cur_level = maybe_layer->layerId();
879    if (!isBatchedAtLevel(self, cur_level)) {
880      return at::_ops::chalf::call(self, memory_format);
881    }
882    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
883    auto results = batch_rule(self_value, self_bdim, memory_format);
884    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
885  }
886  template <typename batch_rule_t, batch_rule_t batch_rule>
887  at::Tensor real_generated_plumbing(const at::Tensor & self) {
888    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
889    auto maybe_layer = maybeCurrentDynamicLayer();
890    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
891    int64_t cur_level = maybe_layer->layerId();
892    if (!isBatchedAtLevel(self, cur_level)) {
893      return at::_ops::real::call(self);
894    }
895    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
896    auto results = batch_rule(self_value, self_bdim);
897    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
898  }
899  template <typename batch_rule_t, batch_rule_t batch_rule>
900  at::Tensor imag_generated_plumbing(const at::Tensor & self) {
901    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
902    auto maybe_layer = maybeCurrentDynamicLayer();
903    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
904    int64_t cur_level = maybe_layer->layerId();
905    if (!isBatchedAtLevel(self, cur_level)) {
906      return at::_ops::imag::call(self);
907    }
908    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
909    auto results = batch_rule(self_value, self_bdim);
910    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
911  }
912  template <typename batch_rule_t, batch_rule_t batch_rule>
913  at::Tensor _conj_generated_plumbing(const at::Tensor & self) {
914    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
915    auto maybe_layer = maybeCurrentDynamicLayer();
916    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
917    int64_t cur_level = maybe_layer->layerId();
918    if (!isBatchedAtLevel(self, cur_level)) {
919      return at::_ops::_conj::call(self);
920    }
921    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
922    auto results = batch_rule(self_value, self_bdim);
923    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
924  }
925  template <typename batch_rule_t, batch_rule_t batch_rule>
926  at::Tensor conj_generated_plumbing(const at::Tensor & self) {
927    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
928    auto maybe_layer = maybeCurrentDynamicLayer();
929    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
930    int64_t cur_level = maybe_layer->layerId();
931    if (!isBatchedAtLevel(self, cur_level)) {
932      return at::_ops::conj::call(self);
933    }
934    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
935    auto results = batch_rule(self_value, self_bdim);
936    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
937  }
938  template <typename batch_rule_t, batch_rule_t batch_rule>
939  at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) {
940    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
941    auto maybe_layer = maybeCurrentDynamicLayer();
942    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
943    int64_t cur_level = maybe_layer->layerId();
944    if (!isBatchedAtLevel(self, cur_level)) {
945      return at::_ops::_conj_physical::call(self);
946    }
947    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
948    auto results = batch_rule(self_value, self_bdim);
949    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
950  }
951  template <typename batch_rule_t, batch_rule_t batch_rule>
952  at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) {
953    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
954    auto maybe_layer = maybeCurrentDynamicLayer();
955    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
956    int64_t cur_level = maybe_layer->layerId();
957    if (!isBatchedAtLevel(self, cur_level)) {
958      return at::_ops::conj_physical::call(self);
959    }
960    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
961    auto results = batch_rule(self_value, self_bdim);
962    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
963  }
964  template <typename batch_rule_t, batch_rule_t batch_rule>
965  at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) {
966    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
967    auto maybe_layer = maybeCurrentDynamicLayer();
968    vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
969    int64_t cur_level = maybe_layer->layerId();
970    if (!isBatchedAtLevel(self, cur_level)) {
971      return at::_ops::conj_physical_::call(self);
972    }
973    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
974    batch_rule(self_value, self_bdim);
975    return self;
976  }
977  template <typename batch_rule_t, batch_rule_t batch_rule>
978  at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) {
979    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
980    auto maybe_layer = maybeCurrentDynamicLayer();
981    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
982    int64_t cur_level = maybe_layer->layerId();
983    if (!isBatchedAtLevel(self, cur_level)) {
984      return at::_ops::resolve_conj::call(self);
985    }
986    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
987    auto results = batch_rule(self_value, self_bdim);
988    return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
989  }
990  template <typename batch_rule_t, batch_rule_t batch_rule>
991  at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) {
992    c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
993    auto maybe_layer = maybeCurrentDynamicLayer();
994    vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
995    int64_t cur_level = maybe_layer->layerId();
996    if (!isBatchedAtLevel(self, cur_level)) {
997      return at::_ops::resolve_neg::call(self);
998    }
999    auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1000   auto results = batch_rule(self_value, self_bdim);
1001   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1002 }
1003 template <typename batch_rule_t, batch_rule_t batch_rule>
1004 at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) {
1005   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1006   auto maybe_layer = maybeCurrentDynamicLayer();
1007   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1008   int64_t cur_level = maybe_layer->layerId();
1009   if (!isBatchedAtLevel(self, cur_level)) {
1010     return at::_ops::_neg_view::call(self);
1011   }
1012   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1013   auto results = batch_rule(self_value, self_bdim);
1014   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1015 }
1016 template <typename batch_rule_t, batch_rule_t batch_rule>
1017 at::Tensor acos_generated_plumbing(const at::Tensor & self) {
1018   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1019   auto maybe_layer = maybeCurrentDynamicLayer();
1020   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1021   int64_t cur_level = maybe_layer->layerId();
1022   if (!isBatchedAtLevel(self, cur_level)) {
1023     return at::_ops::acos::call(self);
1024   }
1025   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1026   auto results = batch_rule(self_value, self_bdim);
1027   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1028 }
1029 template <typename batch_rule_t, batch_rule_t batch_rule>
1030 at::Tensor & acos__generated_plumbing(at::Tensor & self) {
1031   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1032   auto maybe_layer = maybeCurrentDynamicLayer();
1033   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1034   int64_t cur_level = maybe_layer->layerId();
1035   if (!isBatchedAtLevel(self, cur_level)) {
1036     return at::_ops::acos_::call(self);
1037   }
1038   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1039   batch_rule(self_value, self_bdim);
1040   return self;
1041 }
1042 template <typename batch_rule_t, batch_rule_t batch_rule>
1043 at::Tensor arccos_generated_plumbing(const at::Tensor & self) {
1044   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1045   auto maybe_layer = maybeCurrentDynamicLayer();
1046   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1047   int64_t cur_level = maybe_layer->layerId();
1048   if (!isBatchedAtLevel(self, cur_level)) {
1049     return at::_ops::arccos::call(self);
1050   }
1051   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1052   auto results = batch_rule(self_value, self_bdim);
1053   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1054 }
1055 template <typename batch_rule_t, batch_rule_t batch_rule>
1056 at::Tensor & arccos__generated_plumbing(at::Tensor & self) {
1057   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1058   auto maybe_layer = maybeCurrentDynamicLayer();
1059   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1060   int64_t cur_level = maybe_layer->layerId();
1061   if (!isBatchedAtLevel(self, cur_level)) {
1062     return at::_ops::arccos_::call(self);
1063   }
1064   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1065   batch_rule(self_value, self_bdim);
1066   return self;
1067 }
1068 template <typename batch_rule_t, batch_rule_t batch_rule>
1069 at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
1070   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1071   auto maybe_layer = maybeCurrentDynamicLayer();
1072   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1073   int64_t cur_level = maybe_layer->layerId();
1074   if (!isBatchedAtLevel(self, cur_level)) {
1075     return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
1076   }
1077   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1078   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad);
1079   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1080 }
1081 template <typename batch_rule_t, batch_rule_t batch_rule>
1082 at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
1083   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1084   auto maybe_layer = maybeCurrentDynamicLayer();
1085   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1086   int64_t cur_level = maybe_layer->layerId();
1087   if (!isBatchedAtLevel(self, cur_level)) {
1088     return at::_ops::adaptive_avg_pool1d::call(self, output_size);
1089   }
1090   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1091   auto results = batch_rule(self_value, self_bdim, output_size);
1092   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1093 }
1094 template <typename batch_rule_t, batch_rule_t batch_rule>
1095 ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
1096   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1097   auto maybe_layer = maybeCurrentDynamicLayer();
1098   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1099   int64_t cur_level = maybe_layer->layerId();
1100   if (!isBatchedAtLevel(self, cur_level)) {
1101     return at::_ops::adaptive_max_pool1d::call(self, output_size);
1102   }
1103   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1104   auto results = batch_rule(self_value, self_bdim, output_size);
1105   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
1106 }
1107 template <typename batch_rule_t, batch_rule_t batch_rule>
1108 at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1109   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1110   auto maybe_layer = maybeCurrentDynamicLayer();
1111   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1112   int64_t cur_level = maybe_layer->layerId();
1113   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1114     return at::_ops::add_Tensor::call(self, other, alpha);
1115   }
1116   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1117   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
1118   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1119   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1120 }
1121 template <typename batch_rule_t, batch_rule_t batch_rule>
1122 at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1123   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1124   auto maybe_layer = maybeCurrentDynamicLayer();
1125   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1126   int64_t cur_level = maybe_layer->layerId();
1127   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1128     return at::_ops::add__Tensor::call(self, other, alpha);
1129   }
1130   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1131   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
1132   batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1133   return self;
1134 }
1135 template <typename batch_rule_t, batch_rule_t batch_rule>
1136 at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1137   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1138   auto maybe_layer = maybeCurrentDynamicLayer();
1139   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1140   int64_t cur_level = maybe_layer->layerId();
1141   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1142     return at::_ops::_add_relu_Tensor::call(self, other, alpha);
1143   }
1144   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1145   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
1146   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1147   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1148 }
1149 template <typename batch_rule_t, batch_rule_t batch_rule>
1150 at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1151   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1152   auto maybe_layer = maybeCurrentDynamicLayer();
1153   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1154   int64_t cur_level = maybe_layer->layerId();
1155   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1156     return at::_ops::_add_relu__Tensor::call(self, other, alpha);
1157   }
1158   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1159   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
1160   batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1161   return self;
1162 }
1163 template <typename batch_rule_t, batch_rule_t batch_rule>
1164 at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1165   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1166   auto maybe_layer = maybeCurrentDynamicLayer();
1167   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1168   int64_t cur_level = maybe_layer->layerId();
1169   if (!isBatchedAtLevel(self, cur_level)) {
1170     return at::_ops::_add_relu_Scalar::call(self, other, alpha);
1171   }
1172   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1173   auto results = batch_rule(self_value, self_bdim, other, alpha);
1174   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1175 }
1176 template <typename batch_rule_t, batch_rule_t batch_rule>
1177 at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1178   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1179   auto maybe_layer = maybeCurrentDynamicLayer();
1180   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1181   int64_t cur_level = maybe_layer->layerId();
1182   if (!isBatchedAtLevel(self, cur_level)) {
1183     return at::_ops::_add_relu__Scalar::call(self, other, alpha);
1184   }
1185   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1186   batch_rule(self_value, self_bdim, other, alpha);
1187   return self;
1188 }
1189 template <typename batch_rule_t, batch_rule_t batch_rule>
1190 at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1191   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1192   auto maybe_layer = maybeCurrentDynamicLayer();
1193   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1194   int64_t cur_level = maybe_layer->layerId();
1195   if (!isBatchedAtLevel(self, cur_level)) {
1196     return at::_ops::add_Scalar::call(self, other, alpha);
1197   }
1198   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1199   auto results = batch_rule(self_value, self_bdim, other, alpha);
1200   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1201 }
1202 template <typename batch_rule_t, batch_rule_t batch_rule>
1203 at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1204   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1205   auto maybe_layer = maybeCurrentDynamicLayer();
1206   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1207   int64_t cur_level = maybe_layer->layerId();
1208   if (!isBatchedAtLevel(self, cur_level)) {
1209     return at::_ops::add__Scalar::call(self, other, alpha);
1210   }
1211   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1212   batch_rule(self_value, self_bdim, other, alpha);
1213   return self;
1214 }
1215 template <typename batch_rule_t, batch_rule_t batch_rule>
1216 at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1217   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1218   auto maybe_layer = maybeCurrentDynamicLayer();
1219   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1220   int64_t cur_level = maybe_layer->layerId();
1221   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
1222     return at::_ops::addmv::call(self, mat, vec, beta, alpha);
1223   }
1224   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1225   auto [mat_value, mat_bdim] = unwrapTensorAtLevel(mat, cur_level);
1226   auto [vec_value, vec_bdim] = unwrapTensorAtLevel(vec, cur_level);
1227   auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
1228   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1229 }
1230 template <typename batch_rule_t, batch_rule_t batch_rule>
1231 at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1232   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1233   auto maybe_layer = maybeCurrentDynamicLayer();
1234   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1235   int64_t cur_level = maybe_layer->layerId();
1236   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
1237     return at::_ops::addmv_::call(self, mat, vec, beta, alpha);
1238   }
1239   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1240   auto [mat_value, mat_bdim] = unwrapTensorAtLevel(mat, cur_level);
1241   auto [vec_value, vec_bdim] = unwrapTensorAtLevel(vec, cur_level);
1242   batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
1243   return self;
1244 }
1245 template <typename batch_rule_t, batch_rule_t batch_rule>
1246 at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1247   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1248   auto maybe_layer = maybeCurrentDynamicLayer();
1249   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1250   int64_t cur_level = maybe_layer->layerId();
1251   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
1252     return at::_ops::addr::call(self, vec1, vec2, beta, alpha);
1253   }
1254   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1255   auto [vec1_value, vec1_bdim] = unwrapTensorAtLevel(vec1, cur_level);
1256   auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level);
1257   auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
1258   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1259 }
1260 template <typename batch_rule_t, batch_rule_t batch_rule>
1261 at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1262   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1263   auto maybe_layer = maybeCurrentDynamicLayer();
1264   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1265   int64_t cur_level = maybe_layer->layerId();
1266   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
1267     return at::_ops::addr_::call(self, vec1, vec2, beta, alpha);
1268   }
1269   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1270   auto [vec1_value, vec1_bdim] = unwrapTensorAtLevel(vec1, cur_level);
1271   auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level);
1272   batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
1273   return self;
1274 }
1275 template <typename batch_rule_t, batch_rule_t batch_rule>
1276 at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) {
1277   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1278   auto maybe_layer = maybeCurrentDynamicLayer();
1279   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1280   int64_t cur_level = maybe_layer->layerId();
1281   if (!isBatchedAtLevel(theta, cur_level)) {
1282     return at::_ops::affine_grid_generator::call(theta, size, align_corners);
1283   }
1284   auto [theta_value, theta_bdim] = unwrapTensorAtLevel(theta, cur_level);
1285   auto results = batch_rule(theta_value, theta_bdim, size, align_corners);
1286   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1287 }
1288 template <typename batch_rule_t, batch_rule_t batch_rule>
1289 at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) {
1290   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1291   auto maybe_layer = maybeCurrentDynamicLayer();
1292   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1293   int64_t cur_level = maybe_layer->layerId();
1294   if (!isBatchedAtLevel(grad, cur_level)) {
1295     return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners);
1296   }
1297   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
1298   auto results = batch_rule(grad_value, grad_bdim, size, align_corners);
1299   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1300 }
1301 template <typename batch_rule_t, batch_rule_t batch_rule>
1302 at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) {
1303   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1304   auto maybe_layer = maybeCurrentDynamicLayer();
1305   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1306   int64_t cur_level = maybe_layer->layerId();
1307   if (!isBatchedAtLevel(self, cur_level)) {
1308     return at::_ops::_is_all_true::call(self);
1309   }
1310   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1311   auto results = batch_rule(self_value, self_bdim);
1312   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1313 }
1314 template <typename batch_rule_t, batch_rule_t batch_rule>
1315 at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) {
1316   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1317   auto maybe_layer = maybeCurrentDynamicLayer();
1318   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1319   int64_t cur_level = maybe_layer->layerId();
1320   if (!isBatchedAtLevel(self, cur_level)) {
1321     return at::_ops::_is_any_true::call(self);
1322   }
1323   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1324   auto results = batch_rule(self_value, self_bdim);
1325   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1326 }
1327 template <typename batch_rule_t, batch_rule_t batch_rule>
1328 at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) {
1329   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1330   auto maybe_layer = maybeCurrentDynamicLayer();
1331   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1332   int64_t cur_level = maybe_layer->layerId();
1333   if (!isBatchedAtLevel(self, cur_level)) {
1334     return at::_ops::_test_check_tensor::call(self);
1335   }
1336   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1337   auto results = batch_rule(self_value, self_bdim);
1338   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1339 }
1340 template <typename batch_rule_t, batch_rule_t batch_rule>
1341 at::Tensor _test_functorch_fallback_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
1342   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1343   auto maybe_layer = maybeCurrentDynamicLayer();
1344   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1345   int64_t cur_level = maybe_layer->layerId();
1346   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1347     return at::_ops::_test_functorch_fallback::call(self, other);
1348   }
1349   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1350   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
1351   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
1352   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1353 }
1354 template <typename batch_rule_t, batch_rule_t batch_rule>
1355 at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
1356   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1357   auto maybe_layer = maybeCurrentDynamicLayer();
1358   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1359   int64_t cur_level = maybe_layer->layerId();
1360   if (!isBatchedAtLevel(self, cur_level)) {
1361     return at::_ops::all_dim::call(self, dim, keepdim);
1362   }
1363   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1364   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1365   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1366 }
1367 template <typename batch_rule_t, batch_rule_t batch_rule>
1368 at::Tensor all_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
1369   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1370   auto maybe_layer = maybeCurrentDynamicLayer();
1371   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1372   int64_t cur_level = maybe_layer->layerId();
1373   if (!isBatchedAtLevel(self, cur_level)) {
1374     return at::_ops::all_dims::call(self, dim, keepdim);
1375   }
1376   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1377   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1378   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1379 }
1380 template <typename batch_rule_t, batch_rule_t batch_rule>
1381 at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1382   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1383   auto maybe_layer = maybeCurrentDynamicLayer();
1384   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1385   int64_t cur_level = maybe_layer->layerId();
1386   if (!isBatchedAtLevel(self, cur_level)) {
1387     return at::_ops::all_dimname::call(self, dim, keepdim);
1388   }
1389   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1390   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1391   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1392 }
1393 template <typename batch_rule_t, batch_rule_t batch_rule>
1394 at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
1395   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1396   auto maybe_layer = maybeCurrentDynamicLayer();
1397   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1398   int64_t cur_level = maybe_layer->layerId();
1399   if (!isBatchedAtLevel(self, cur_level)) {
1400     return at::_ops::any_dim::call(self, dim, keepdim);
1401   }
1402   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1403   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1404   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1405 }
1406 template <typename batch_rule_t, batch_rule_t batch_rule>
1407 at::Tensor any_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
1408   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1409   auto maybe_layer = maybeCurrentDynamicLayer();
1410   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1411   int64_t cur_level = maybe_layer->layerId();
1412   if (!isBatchedAtLevel(self, cur_level)) {
1413     return at::_ops::any_dims::call(self, dim, keepdim);
1414   }
1415   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1416   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1417   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1418 }
1419 template <typename batch_rule_t, batch_rule_t batch_rule>
1420 at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1421   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1422   auto maybe_layer = maybeCurrentDynamicLayer();
1423   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1424   int64_t cur_level = maybe_layer->layerId();
1425   if (!isBatchedAtLevel(self, cur_level)) {
1426     return at::_ops::any_dimname::call(self, dim, keepdim);
1427   }
1428   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1429   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1430   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1431 }
1432 template <typename batch_rule_t, batch_rule_t batch_rule>
1433 at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) {
1434   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1435   auto maybe_layer = maybeCurrentDynamicLayer();
1436   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1437   int64_t cur_level = maybe_layer->layerId();
1438   if (!isBatchedAtLevel(like, cur_level)) {
1439     return at::_ops::_dim_arange::call(like, dim);
1440   }
1441   auto [like_value, like_bdim] = unwrapTensorAtLevel(like, cur_level);
1442   auto results = batch_rule(like_value, like_bdim, dim);
1443   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1444 }
1445 template <typename batch_rule_t, batch_rule_t batch_rule>
1446 at::Tensor argmax_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
1447   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1448   auto maybe_layer = maybeCurrentDynamicLayer();
1449   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1450   int64_t cur_level = maybe_layer->layerId();
1451   if (!isBatchedAtLevel(self, cur_level)) {
1452     return at::_ops::argmax::call(self, dim, keepdim);
1453   }
1454   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1455   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1456   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1457 }
1458 template <typename batch_rule_t, batch_rule_t batch_rule>
1459 at::Tensor argmin_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
1460   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1461   auto maybe_layer = maybeCurrentDynamicLayer();
1462   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1463   int64_t cur_level = maybe_layer->layerId();
1464   if (!isBatchedAtLevel(self, cur_level)) {
1465     return at::_ops::argmin::call(self, dim, keepdim);
1466   }
1467   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1468   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1469   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1470 }
1471 template <typename batch_rule_t, batch_rule_t batch_rule>
1472 at::Tensor acosh_generated_plumbing(const at::Tensor & self) {
1473   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1474   auto maybe_layer = maybeCurrentDynamicLayer();
1475   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1476   int64_t cur_level = maybe_layer->layerId();
1477   if (!isBatchedAtLevel(self, cur_level)) {
1478     return at::_ops::acosh::call(self);
1479   }
1480   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1481   auto results = batch_rule(self_value, self_bdim);
1482   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1483 }
1484 template <typename batch_rule_t, batch_rule_t batch_rule>
1485 at::Tensor & acosh__generated_plumbing(at::Tensor & self) {
1486   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1487   auto maybe_layer = maybeCurrentDynamicLayer();
1488   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1489   int64_t cur_level = maybe_layer->layerId();
1490   if (!isBatchedAtLevel(self, cur_level)) {
1491     return at::_ops::acosh_::call(self);
1492   }
1493   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1494   batch_rule(self_value, self_bdim);
1495   return self;
1496 }
1497 template <typename batch_rule_t, batch_rule_t batch_rule>
1498 at::Tensor arccosh_generated_plumbing(const at::Tensor & self) {
1499   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1500   auto maybe_layer = maybeCurrentDynamicLayer();
1501   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1502   int64_t cur_level = maybe_layer->layerId();
1503   if (!isBatchedAtLevel(self, cur_level)) {
1504     return at::_ops::arccosh::call(self);
1505   }
1506   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1507   auto results = batch_rule(self_value, self_bdim);
1508   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1509 }
1510 template <typename batch_rule_t, batch_rule_t batch_rule>
1511 at::Tensor & arccosh__generated_plumbing(at::Tensor & self) {
1512   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1513   auto maybe_layer = maybeCurrentDynamicLayer();
1514   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1515   int64_t cur_level = maybe_layer->layerId();
1516   if (!isBatchedAtLevel(self, cur_level)) {
1517     return at::_ops::arccosh_::call(self);
1518   }
1519   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1520   batch_rule(self_value, self_bdim);
1521   return self;
1522 }
1523 template <typename batch_rule_t, batch_rule_t batch_rule>
1524 at::Tensor asinh_generated_plumbing(const at::Tensor & self) {
1525   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1526   auto maybe_layer = maybeCurrentDynamicLayer();
1527   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1528   int64_t cur_level = maybe_layer->layerId();
1529   if (!isBatchedAtLevel(self, cur_level)) {
1530     return at::_ops::asinh::call(self);
1531   }
1532   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1533   auto results = batch_rule(self_value, self_bdim);
1534   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1535 }
1536 template <typename batch_rule_t, batch_rule_t batch_rule>
1537 at::Tensor & asinh__generated_plumbing(at::Tensor & self) {
1538   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1539   auto maybe_layer = maybeCurrentDynamicLayer();
1540   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1541   int64_t cur_level = maybe_layer->layerId();
1542   if (!isBatchedAtLevel(self, cur_level)) {
1543     return at::_ops::asinh_::call(self);
1544   }
1545   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1546   batch_rule(self_value, self_bdim);
1547   return self;
1548 }
1549 template <typename batch_rule_t, batch_rule_t batch_rule>
1550 at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) {
1551   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1552   auto maybe_layer = maybeCurrentDynamicLayer();
1553   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1554   int64_t cur_level = maybe_layer->layerId();
1555   if (!isBatchedAtLevel(self, cur_level)) {
1556     return at::_ops::arcsinh::call(self);
1557   }
1558   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1559   auto results = batch_rule(self_value, self_bdim);
1560   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1561 }
1562 template <typename batch_rule_t, batch_rule_t batch_rule>
1563 at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) {
1564   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1565   auto maybe_layer = maybeCurrentDynamicLayer();
1566   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1567   int64_t cur_level = maybe_layer->layerId();
1568   if (!isBatchedAtLevel(self, cur_level)) {
1569     return at::_ops::arcsinh_::call(self);
1570   }
1571   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1572   batch_rule(self_value, self_bdim);
1573   return self;
1574 }
1575 template <typename batch_rule_t, batch_rule_t batch_rule>
1576 at::Tensor atanh_generated_plumbing(const at::Tensor & self) {
1577   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1578   auto maybe_layer = maybeCurrentDynamicLayer();
1579   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1580   int64_t cur_level = maybe_layer->layerId();
1581   if (!isBatchedAtLevel(self, cur_level)) {
1582     return at::_ops::atanh::call(self);
1583   }
1584   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1585   auto results = batch_rule(self_value, self_bdim);
1586   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1587 }
1588 template <typename batch_rule_t, batch_rule_t batch_rule>
1589 at::Tensor & atanh__generated_plumbing(at::Tensor & self) {
1590   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1591   auto maybe_layer = maybeCurrentDynamicLayer();
1592   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1593   int64_t cur_level = maybe_layer->layerId();
1594   if (!isBatchedAtLevel(self, cur_level)) {
1595     return at::_ops::atanh_::call(self);
1596   }
1597   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1598   batch_rule(self_value, self_bdim);
1599   return self;
1600 }
1601 template <typename batch_rule_t, batch_rule_t batch_rule>
1602 at::Tensor arctanh_generated_plumbing(const at::Tensor & self) {
1603   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1604   auto maybe_layer = maybeCurrentDynamicLayer();
1605   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1606   int64_t cur_level = maybe_layer->layerId();
1607   if (!isBatchedAtLevel(self, cur_level)) {
1608     return at::_ops::arctanh::call(self);
1609   }
1610   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1611   auto results = batch_rule(self_value, self_bdim);
1612   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1613 }
1614 template <typename batch_rule_t, batch_rule_t batch_rule>
1615 at::Tensor & arctanh__generated_plumbing(at::Tensor & self) {
1616   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1617   auto maybe_layer = maybeCurrentDynamicLayer();
1618   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1619   int64_t cur_level = maybe_layer->layerId();
1620   if (!isBatchedAtLevel(self, cur_level)) {
1621     return at::_ops::arctanh_::call(self);
1622   }
1623   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1624   batch_rule(self_value, self_bdim);
1625   return self;
1626 }
1627 template <typename batch_rule_t, batch_rule_t batch_rule>
1628 at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
1629   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1630   auto maybe_layer = maybeCurrentDynamicLayer();
1631   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1632   int64_t cur_level = maybe_layer->layerId();
1633   if (!isBatchedAtLevel(self, cur_level)) {
1634     return at::_ops::as_strided::call(self, size, stride, storage_offset);
1635   }
1636   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1637   auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
1638   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1639 }
1640 template <typename batch_rule_t, batch_rule_t batch_rule>
1641 at::Tensor asin_generated_plumbing(const at::Tensor & self) {
1642   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1643   auto maybe_layer = maybeCurrentDynamicLayer();
1644   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1645   int64_t cur_level = maybe_layer->layerId();
1646   if (!isBatchedAtLevel(self, cur_level)) {
1647     return at::_ops::asin::call(self);
1648   }
1649   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1650   auto results = batch_rule(self_value, self_bdim);
1651   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1652 }
1653 template <typename batch_rule_t, batch_rule_t batch_rule>
1654 at::Tensor & asin__generated_plumbing(at::Tensor & self) {
1655   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1656   auto maybe_layer = maybeCurrentDynamicLayer();
1657   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1658   int64_t cur_level = maybe_layer->layerId();
1659   if (!isBatchedAtLevel(self, cur_level)) {
1660     return at::_ops::asin_::call(self);
1661   }
1662   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1663   batch_rule(self_value, self_bdim);
1664   return self;
1665 }
1666 template <typename batch_rule_t, batch_rule_t batch_rule>
1667 at::Tensor arcsin_generated_plumbing(const at::Tensor & self) {
1668   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1669   auto maybe_layer = maybeCurrentDynamicLayer();
1670   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1671   int64_t cur_level = maybe_layer->layerId();
1672   if (!isBatchedAtLevel(self, cur_level)) {
1673     return at::_ops::arcsin::call(self);
1674   }
1675   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1676   auto results = batch_rule(self_value, self_bdim);
1677   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1678 }
1679 template <typename batch_rule_t, batch_rule_t batch_rule>
1680 at::Tensor & arcsin__generated_plumbing(at::Tensor & self) {
1681   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1682   auto maybe_layer = maybeCurrentDynamicLayer();
1683   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1684   int64_t cur_level = maybe_layer->layerId();
1685   if (!isBatchedAtLevel(self, cur_level)) {
1686     return at::_ops::arcsin_::call(self);
1687   }
1688   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1689   batch_rule(self_value, self_bdim);
1690   return self;
1691 }
1692 template <typename batch_rule_t, batch_rule_t batch_rule>
1693 at::Tensor atan_generated_plumbing(const at::Tensor & self) {
1694   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1695   auto maybe_layer = maybeCurrentDynamicLayer();
1696   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1697   int64_t cur_level = maybe_layer->layerId();
1698   if (!isBatchedAtLevel(self, cur_level)) {
1699     return at::_ops::atan::call(self);
1700   }
1701   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1702   auto results = batch_rule(self_value, self_bdim);
1703   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1704 }
1705 template <typename batch_rule_t, batch_rule_t batch_rule>
1706 at::Tensor & atan__generated_plumbing(at::Tensor & self) {
1707   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1708   auto maybe_layer = maybeCurrentDynamicLayer();
1709   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1710   int64_t cur_level = maybe_layer->layerId();
1711   if (!isBatchedAtLevel(self, cur_level)) {
1712     return at::_ops::atan_::call(self);
1713   }
1714   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1715   batch_rule(self_value, self_bdim);
1716   return self;
1717 }
1718 template <typename batch_rule_t, batch_rule_t batch_rule>
1719 at::Tensor arctan_generated_plumbing(const at::Tensor & self) {
1720   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1721   auto maybe_layer = maybeCurrentDynamicLayer();
1722   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1723   int64_t cur_level = maybe_layer->layerId();
1724   if (!isBatchedAtLevel(self, cur_level)) {
1725     return at::_ops::arctan::call(self);
1726   }
1727   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1728   auto results = batch_rule(self_value, self_bdim);
1729   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1730 }
1731 template <typename batch_rule_t, batch_rule_t batch_rule>
1732 at::Tensor & arctan__generated_plumbing(at::Tensor & self) {
1733   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1734   auto maybe_layer = maybeCurrentDynamicLayer();
1735   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1736   int64_t cur_level = maybe_layer->layerId();
1737   if (!isBatchedAtLevel(self, cur_level)) {
1738     return at::_ops::arctan_::call(self);
1739   }
1740   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1741   batch_rule(self_value, self_bdim);
1742   return self;
1743 }
1744 template <typename batch_rule_t, batch_rule_t batch_rule>
1745 at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) {
1746   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1747   auto maybe_layer = maybeCurrentDynamicLayer();
1748   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1749   int64_t cur_level = maybe_layer->layerId();
1750   if (!isBatchedAtLevel(self, cur_level)) {
1751     return at::_ops::atleast_1d::call(self);
1752   }
1753   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1754   auto results = batch_rule(self_value, self_bdim);
1755   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1756 }
1757 template <typename batch_rule_t, batch_rule_t batch_rule>
1758 ::std::vector<at::Tensor> atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) {
1759   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1760   auto maybe_layer = maybeCurrentDynamicLayer();
1761   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1762   int64_t cur_level = maybe_layer->layerId();
1763   if (!isBatchedAtLevel(tensors, cur_level)) {
1764     return at::_ops::atleast_1d_Sequence::call(tensors);
1765   }
1766 
1767   auto results = batch_rule(tensors);
1768   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
1769 }
1770 template <typename batch_rule_t, batch_rule_t batch_rule>
1771 at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) {
1772   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1773   auto maybe_layer = maybeCurrentDynamicLayer();
1774   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1775   int64_t cur_level = maybe_layer->layerId();
1776   if (!isBatchedAtLevel(self, cur_level)) {
1777     return at::_ops::atleast_2d::call(self);
1778   }
1779   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1780   auto results = batch_rule(self_value, self_bdim);
1781   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1782 }
1783 template <typename batch_rule_t, batch_rule_t batch_rule>
1784 ::std::vector<at::Tensor> atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) {
1785   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1786   auto maybe_layer = maybeCurrentDynamicLayer();
1787   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1788   int64_t cur_level = maybe_layer->layerId();
1789   if (!isBatchedAtLevel(tensors, cur_level)) {
1790     return at::_ops::atleast_2d_Sequence::call(tensors);
1791   }
1792 
1793   auto results = batch_rule(tensors);
1794   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
1795 }
1796 template <typename batch_rule_t, batch_rule_t batch_rule>
1797 at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) {
1798   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1799   auto maybe_layer = maybeCurrentDynamicLayer();
1800   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1801   int64_t cur_level = maybe_layer->layerId();
1802   if (!isBatchedAtLevel(self, cur_level)) {
1803     return at::_ops::atleast_3d::call(self);
1804   }
1805   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1806   auto results = batch_rule(self_value, self_bdim);
1807   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1808 }
1809 template <typename batch_rule_t, batch_rule_t batch_rule>
1810 ::std::vector<at::Tensor> atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) {
1811   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1812   auto maybe_layer = maybeCurrentDynamicLayer();
1813   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1814   int64_t cur_level = maybe_layer->layerId();
1815   if (!isBatchedAtLevel(tensors, cur_level)) {
1816     return at::_ops::atleast_3d_Sequence::call(tensors);
1817   }
1818 
1819   auto results = batch_rule(tensors);
1820   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
1821 }
1822 template <typename batch_rule_t, batch_rule_t batch_rule>
1823 at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
1824   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1825   auto maybe_layer = maybeCurrentDynamicLayer();
1826   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1827   int64_t cur_level = maybe_layer->layerId();
1828   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
1829     return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
1830   }
1831   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1832   auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level);
1833   auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level);
1834   auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
1835   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1836 }
1837 template <typename batch_rule_t, batch_rule_t batch_rule>
1838 at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
1839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1840   auto maybe_layer = maybeCurrentDynamicLayer();
1841   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1842   int64_t cur_level = maybe_layer->layerId();
1843   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
1844     return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha);
1845   }
1846   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1847   auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level);
1848   auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level);
1849   batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
1850   return self;
1851 }
1852 template <typename batch_rule_t, batch_rule_t batch_rule>
1853 at::Tensor baddbmm_dtype_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::ScalarType out_dtype, const at::Scalar & beta, const at::Scalar & alpha) {
1854   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1855   auto maybe_layer = maybeCurrentDynamicLayer();
1856   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1857   int64_t cur_level = maybe_layer->layerId();
1858   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
1859     return at::_ops::baddbmm_dtype::call(self, batch1, batch2, out_dtype, beta, alpha);
1860   }
1861   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1862   auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level);
1863   auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level);
1864   auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, out_dtype, beta, alpha);
1865   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1866 }
1867 template <typename batch_rule_t, batch_rule_t batch_rule>
1868 at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1869   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1870   auto maybe_layer = maybeCurrentDynamicLayer();
1871   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1872   int64_t cur_level = maybe_layer->layerId();
1873   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
1874     return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1875   }
1876   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
1877   std::optional<Tensor> weight_value;
1878   std::optional<int64_t> weight_bdim;
1879   if (weight) {
1880       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
1881   }
1882   std::optional<Tensor> bias_value;
1883   std::optional<int64_t> bias_bdim;
1884   if (bias) {
1885       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
1886   }
1887   std::optional<Tensor> running_mean_value;
1888   std::optional<int64_t> running_mean_bdim;
1889   if (running_mean) {
1890       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
1891   }
1892   std::optional<Tensor> running_var_value;
1893   std::optional<int64_t> running_var_bdim;
1894   if (running_var) {
1895       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
1896   }
1897   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled);
1898   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1899 }
1900 template <typename batch_rule_t, batch_rule_t batch_rule>
1901 at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
1902   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1903   auto maybe_layer = maybeCurrentDynamicLayer();
1904   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1905   int64_t cur_level = maybe_layer->layerId();
1906   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) {
1907     return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
1908   }
1909   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
1910   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
1911   auto [var_value, var_bdim] = unwrapTensorAtLevel(var, cur_level);
1912   std::optional<Tensor> weight_value;
1913   std::optional<int64_t> weight_bdim;
1914   if (weight) {
1915       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
1916   }
1917   std::optional<Tensor> bias_value;
1918   std::optional<int64_t> bias_bdim;
1919   if (bias) {
1920       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
1921   }
1922   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point);
1923   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1924 }
1925 template <typename batch_rule_t, batch_rule_t batch_rule>
1926 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
1927   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1928   auto maybe_layer = maybeCurrentDynamicLayer();
1929   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1930   int64_t cur_level = maybe_layer->layerId();
1931   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) {
1932     return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
1933   }
1934   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
1935   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
1936   auto [reservedSpace_value, reservedSpace_bdim] = unwrapTensorAtLevel(reservedSpace, cur_level);
1937   std::optional<Tensor> weight_value;
1938   std::optional<int64_t> weight_bdim;
1939   if (weight) {
1940       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
1941   }
1942   std::optional<Tensor> running_mean_value;
1943   std::optional<int64_t> running_mean_bdim;
1944   if (running_mean) {
1945       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
1946   }
1947   std::optional<Tensor> running_var_value;
1948   std::optional<int64_t> running_var_bdim;
1949   if (running_var) {
1950       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
1951   }
1952   std::optional<Tensor> save_mean_value;
1953   std::optional<int64_t> save_mean_bdim;
1954   if (save_mean) {
1955       std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
1956   }
1957   std::optional<Tensor> save_var_transform_value;
1958   std::optional<int64_t> save_var_transform_bdim;
1959   if (save_var_transform) {
1960       std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level);
1961   }
1962   auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim);
1963   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
1964 }
1965 template <typename batch_rule_t, batch_rule_t batch_rule>
1966 at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, ::std::optional<at::Generator> generator) {
1967   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1968   auto maybe_layer = maybeCurrentDynamicLayer();
1969   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1970   int64_t cur_level = maybe_layer->layerId();
1971   if (!isBatchedAtLevel(self, cur_level)) {
1972     return at::_ops::bernoulli::call(self, generator);
1973   }
1974   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1975   auto results = batch_rule(self_value, self_bdim, generator);
1976   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1977 }
1978 template <typename batch_rule_t, batch_rule_t batch_rule>
1979 at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
1980   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1981   auto maybe_layer = maybeCurrentDynamicLayer();
1982   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1983   int64_t cur_level = maybe_layer->layerId();
1984   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
1985     return at::_ops::bernoulli__Tensor::call(self, p, generator);
1986   }
1987   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
1988   auto [p_value, p_bdim] = unwrapTensorAtLevel(p, cur_level);
1989   batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
1990   return self;
1991 }
1992 template <typename batch_rule_t, batch_rule_t batch_rule>
1993 at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
1994   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1995   auto maybe_layer = maybeCurrentDynamicLayer();
1996   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1997   int64_t cur_level = maybe_layer->layerId();
1998   if (!isBatchedAtLevel(self, cur_level)) {
1999     return at::_ops::bernoulli__float::call(self, p, generator);
2000   }
2001   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2002   batch_rule(self_value, self_bdim, p, generator);
2003   return self;
2004 }
2005 template <typename batch_rule_t, batch_rule_t batch_rule>
2006 at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
2007   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2008   auto maybe_layer = maybeCurrentDynamicLayer();
2009   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2010   int64_t cur_level = maybe_layer->layerId();
2011   if (!isBatchedAtLevel(self, cur_level)) {
2012     return at::_ops::bernoulli_p::call(self, p, generator);
2013   }
2014   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2015   auto results = batch_rule(self_value, self_bdim, p, generator);
2016   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2017 }
2018 template <typename batch_rule_t, batch_rule_t batch_rule>
2019 at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
2020   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2021   auto maybe_layer = maybeCurrentDynamicLayer();
2022   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2023   int64_t cur_level = maybe_layer->layerId();
2024   if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
2025     return at::_ops::bilinear::call(input1, input2, weight, bias);
2026   }
2027   auto [input1_value, input1_bdim] = unwrapTensorAtLevel(input1, cur_level);
2028   auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level);
2029   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2030   std::optional<Tensor> bias_value;
2031   std::optional<int64_t> bias_bdim;
2032   if (bias) {
2033       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2034   }
2035   auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
2036   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2037 }
2038 template <typename batch_rule_t, batch_rule_t batch_rule>
2039 at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
2040   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2041   auto maybe_layer = maybeCurrentDynamicLayer();
2042   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2043   int64_t cur_level = maybe_layer->layerId();
2044   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
2045     return at::_ops::binary_cross_entropy::call(self, target, weight, reduction);
2046   }
2047   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2048   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
2049   std::optional<Tensor> weight_value;
2050   std::optional<int64_t> weight_bdim;
2051   if (weight) {
2052       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2053   }
2054   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
2055   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2056 }
2057 template <typename batch_rule_t, batch_rule_t batch_rule>
2058 at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
2059   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2060   auto maybe_layer = maybeCurrentDynamicLayer();
2061   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2062   int64_t cur_level = maybe_layer->layerId();
2063   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
2064     return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction);
2065   }
2066   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
2067   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2068   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
2069   std::optional<Tensor> weight_value;
2070   std::optional<int64_t> weight_bdim;
2071   if (weight) {
2072       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2073   }
2074   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
2075   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2076 }
2077 template <typename batch_rule_t, batch_rule_t batch_rule>
2078 at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction) {
2079   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2080   auto maybe_layer = maybeCurrentDynamicLayer();
2081   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2082   int64_t cur_level = maybe_layer->layerId();
2083   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) {
2084     return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
2085   }
2086   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2087   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
2088   std::optional<Tensor> weight_value;
2089   std::optional<int64_t> weight_bdim;
2090   if (weight) {
2091       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2092   }
2093   std::optional<Tensor> pos_weight_value;
2094   std::optional<int64_t> pos_weight_bdim;
2095   if (pos_weight) {
2096       std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level);
2097   }
2098   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction);
2099   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2100 }
2101 template <typename batch_rule_t, batch_rule_t batch_rule>
2102 at::Tensor bincount_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Tensor> & weights, c10::SymInt minlength) {
2103   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2104   auto maybe_layer = maybeCurrentDynamicLayer();
2105   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2106   int64_t cur_level = maybe_layer->layerId();
2107   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
2108     return at::_ops::bincount::call(self, weights, minlength);
2109   }
2110   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2111   std::optional<Tensor> weights_value;
2112   std::optional<int64_t> weights_bdim;
2113   if (weights) {
2114       std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level);
2115   }
2116   auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength);
2117   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2118 }
2119 template <typename batch_rule_t, batch_rule_t batch_rule>
2120 at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) {
2121   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2122   auto maybe_layer = maybeCurrentDynamicLayer();
2123   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2124   int64_t cur_level = maybe_layer->layerId();
2125   if (!isBatchedAtLevel(self, cur_level)) {
2126     return at::_ops::bitwise_not::call(self);
2127   }
2128   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2129   auto results = batch_rule(self_value, self_bdim);
2130   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2131 }
2132 template <typename batch_rule_t, batch_rule_t batch_rule>
2133 at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) {
2134   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2135   auto maybe_layer = maybeCurrentDynamicLayer();
2136   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2137   int64_t cur_level = maybe_layer->layerId();
2138   if (!isBatchedAtLevel(self, cur_level)) {
2139     return at::_ops::bitwise_not_::call(self);
2140   }
2141   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2142   batch_rule(self_value, self_bdim);
2143   return self;
2144 }
2145 template <typename batch_rule_t, batch_rule_t batch_rule>
2146 at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2147   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2148   auto maybe_layer = maybeCurrentDynamicLayer();
2149   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2150   int64_t cur_level = maybe_layer->layerId();
2151   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2152     return at::_ops::copysign_Tensor::call(self, other);
2153   }
2154   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2155   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2156   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2157   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2158 }
2159 template <typename batch_rule_t, batch_rule_t batch_rule>
2160 at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2161   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2162   auto maybe_layer = maybeCurrentDynamicLayer();
2163   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2164   int64_t cur_level = maybe_layer->layerId();
2165   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2166     return at::_ops::copysign__Tensor::call(self, other);
2167   }
2168   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2169   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2170   batch_rule(self_value, self_bdim, other_value, other_bdim);
2171   return self;
2172 }
2173 template <typename batch_rule_t, batch_rule_t batch_rule>
2174 at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
2175   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2176   auto maybe_layer = maybeCurrentDynamicLayer();
2177   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2178   int64_t cur_level = maybe_layer->layerId();
2179   if (!isBatchedAtLevel(self, cur_level)) {
2180     return at::_ops::copysign_Scalar::call(self, other);
2181   }
2182   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2183   auto results = batch_rule(self_value, self_bdim, other);
2184   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2185 }
2186 template <typename batch_rule_t, batch_rule_t batch_rule>
2187 at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
2188   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2189   auto maybe_layer = maybeCurrentDynamicLayer();
2190   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2191   int64_t cur_level = maybe_layer->layerId();
2192   if (!isBatchedAtLevel(self, cur_level)) {
2193     return at::_ops::copysign__Scalar::call(self, other);
2194   }
2195   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2196   batch_rule(self_value, self_bdim, other);
2197   return self;
2198 }
2199 template <typename batch_rule_t, batch_rule_t batch_rule>
2200 at::Tensor _lazy_clone_generated_plumbing(const at::Tensor & self) {
2201   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2202   auto maybe_layer = maybeCurrentDynamicLayer();
2203   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2204   int64_t cur_level = maybe_layer->layerId();
2205   if (!isBatchedAtLevel(self, cur_level)) {
2206     return at::_ops::_lazy_clone::call(self);
2207   }
2208   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2209   auto results = batch_rule(self_value, self_bdim);
2210   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2211 }
2212 template <typename batch_rule_t, batch_rule_t batch_rule>
2213 at::Tensor logical_not_generated_plumbing(const at::Tensor & self) {
2214   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2215   auto maybe_layer = maybeCurrentDynamicLayer();
2216   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2217   int64_t cur_level = maybe_layer->layerId();
2218   if (!isBatchedAtLevel(self, cur_level)) {
2219     return at::_ops::logical_not::call(self);
2220   }
2221   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2222   auto results = batch_rule(self_value, self_bdim);
2223   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2224 }
2225 template <typename batch_rule_t, batch_rule_t batch_rule>
2226 at::Tensor & logical_not__generated_plumbing(at::Tensor & self) {
2227   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2228   auto maybe_layer = maybeCurrentDynamicLayer();
2229   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2230   int64_t cur_level = maybe_layer->layerId();
2231   if (!isBatchedAtLevel(self, cur_level)) {
2232     return at::_ops::logical_not_::call(self);
2233   }
2234   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2235   batch_rule(self_value, self_bdim);
2236   return self;
2237 }
2238 template <typename batch_rule_t, batch_rule_t batch_rule>
2239 at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2240   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2241   auto maybe_layer = maybeCurrentDynamicLayer();
2242   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2243   int64_t cur_level = maybe_layer->layerId();
2244   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2245     return at::_ops::logical_xor::call(self, other);
2246   }
2247   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2248   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2249   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2250   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2251 }
2252 template <typename batch_rule_t, batch_rule_t batch_rule>
2253 at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2254   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2255   auto maybe_layer = maybeCurrentDynamicLayer();
2256   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2257   int64_t cur_level = maybe_layer->layerId();
2258   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2259     return at::_ops::logical_xor_::call(self, other);
2260   }
2261   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2262   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2263   batch_rule(self_value, self_bdim, other_value, other_bdim);
2264   return self;
2265 }
2266 template <typename batch_rule_t, batch_rule_t batch_rule>
2267 at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2268   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2269   auto maybe_layer = maybeCurrentDynamicLayer();
2270   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2271   int64_t cur_level = maybe_layer->layerId();
2272   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2273     return at::_ops::logical_and::call(self, other);
2274   }
2275   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2276   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2277   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2278   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2279 }
2280 template <typename batch_rule_t, batch_rule_t batch_rule>
2281 at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2282   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2283   auto maybe_layer = maybeCurrentDynamicLayer();
2284   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2285   int64_t cur_level = maybe_layer->layerId();
2286   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2287     return at::_ops::logical_and_::call(self, other);
2288   }
2289   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2290   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2291   batch_rule(self_value, self_bdim, other_value, other_bdim);
2292   return self;
2293 }
2294 template <typename batch_rule_t, batch_rule_t batch_rule>
2295 at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2296   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2297   auto maybe_layer = maybeCurrentDynamicLayer();
2298   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2299   int64_t cur_level = maybe_layer->layerId();
2300   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2301     return at::_ops::logical_or::call(self, other);
2302   }
2303   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2304   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2305   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2306   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2307 }
2308 template <typename batch_rule_t, batch_rule_t batch_rule>
2309 at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2310   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2311   auto maybe_layer = maybeCurrentDynamicLayer();
2312   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2313   int64_t cur_level = maybe_layer->layerId();
2314   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2315     return at::_ops::logical_or_::call(self, other);
2316   }
2317   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2318   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
2319   batch_rule(self_value, self_bdim, other_value, other_bdim);
2320   return self;
2321 }
2322 template <typename batch_rule_t, batch_rule_t batch_rule>
2323 at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
2324   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2325   auto maybe_layer = maybeCurrentDynamicLayer();
2326   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2327   int64_t cur_level = maybe_layer->layerId();
2328   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
2329     return at::_ops::bmm::call(self, mat2);
2330   }
2331   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2332   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
2333   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
2334   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2335 }
2336 template <typename batch_rule_t, batch_rule_t batch_rule>
2337 at::Tensor bmm_dtype_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, at::ScalarType out_dtype) {
2338   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2339   auto maybe_layer = maybeCurrentDynamicLayer();
2340   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2341   int64_t cur_level = maybe_layer->layerId();
2342   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
2343     return at::_ops::bmm_dtype::call(self, mat2, out_dtype);
2344   }
2345   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2346   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
2347   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, out_dtype);
2348   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2349 }
2350 template <typename batch_rule_t, batch_rule_t batch_rule>
2351 ::std::vector<at::Tensor> broadcast_tensors_generated_plumbing(at::TensorList tensors) {
2352   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2353   auto maybe_layer = maybeCurrentDynamicLayer();
2354   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2355   int64_t cur_level = maybe_layer->layerId();
2356   if (!isBatchedAtLevel(tensors, cur_level)) {
2357     return at::_ops::broadcast_tensors::call(tensors);
2358   }
2359 
2360   auto results = batch_rule(tensors);
2361   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2362 }
2363 template <typename batch_rule_t, batch_rule_t batch_rule>
2364 at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
2365   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2366   auto maybe_layer = maybeCurrentDynamicLayer();
2367   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2368   int64_t cur_level = maybe_layer->layerId();
2369   if (!isBatchedAtLevel(self, cur_level)) {
2370     return at::_ops::broadcast_to::call(self, size);
2371   }
2372   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2373   auto results = batch_rule(self_value, self_bdim, size);
2374   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2375 }
2376 template <typename batch_rule_t, batch_rule_t batch_rule>
2377 at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
2378   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2379   auto maybe_layer = maybeCurrentDynamicLayer();
2380   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2381   int64_t cur_level = maybe_layer->layerId();
2382   if (!isBatchedAtLevel(self, cur_level)) {
2383     return at::_ops::_sparse_broadcast_to::call(self, size);
2384   }
2385   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2386   auto results = batch_rule(self_value, self_bdim, size);
2387   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2388 }
2389 template <typename batch_rule_t, batch_rule_t batch_rule>
2390 at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) {
2391   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2392   auto maybe_layer = maybeCurrentDynamicLayer();
2393   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2394   int64_t cur_level = maybe_layer->layerId();
2395   if (!isBatchedAtLevel(tensors, cur_level)) {
2396     return at::_ops::cat::call(tensors, dim);
2397   }
2398 
2399   auto results = batch_rule(tensors, dim);
2400   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2401 }
2402 template <typename batch_rule_t, batch_rule_t batch_rule>
2403 at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
2404   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2405   auto maybe_layer = maybeCurrentDynamicLayer();
2406   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2407   int64_t cur_level = maybe_layer->layerId();
2408   if (!isBatchedAtLevel(tensors, cur_level)) {
2409     return at::_ops::cat_names::call(tensors, dim);
2410   }
2411 
2412   auto results = batch_rule(tensors, dim);
2413   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2414 }
2415 template <typename batch_rule_t, batch_rule_t batch_rule>
2416 at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) {
2417   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2418   auto maybe_layer = maybeCurrentDynamicLayer();
2419   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2420   int64_t cur_level = maybe_layer->layerId();
2421   if (!isBatchedAtLevel(tensors, cur_level)) {
2422     return at::_ops::concat::call(tensors, dim);
2423   }
2424 
2425   auto results = batch_rule(tensors, dim);
2426   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2427 }
2428 template <typename batch_rule_t, batch_rule_t batch_rule>
2429 at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
2430   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2431   auto maybe_layer = maybeCurrentDynamicLayer();
2432   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2433   int64_t cur_level = maybe_layer->layerId();
2434   if (!isBatchedAtLevel(tensors, cur_level)) {
2435     return at::_ops::concat_names::call(tensors, dim);
2436   }
2437 
2438   auto results = batch_rule(tensors, dim);
2439   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2440 }
2441 template <typename batch_rule_t, batch_rule_t batch_rule>
2442 at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) {
2443   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2444   auto maybe_layer = maybeCurrentDynamicLayer();
2445   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2446   int64_t cur_level = maybe_layer->layerId();
2447   if (!isBatchedAtLevel(tensors, cur_level)) {
2448     return at::_ops::concatenate::call(tensors, dim);
2449   }
2450 
2451   auto results = batch_rule(tensors, dim);
2452   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2453 }
2454 template <typename batch_rule_t, batch_rule_t batch_rule>
2455 at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
2456   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2457   auto maybe_layer = maybeCurrentDynamicLayer();
2458   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2459   int64_t cur_level = maybe_layer->layerId();
2460   if (!isBatchedAtLevel(tensors, cur_level)) {
2461     return at::_ops::concatenate_names::call(tensors, dim);
2462   }
2463 
2464   auto results = batch_rule(tensors, dim);
2465   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2466 }
2467 template <typename batch_rule_t, batch_rule_t batch_rule>
2468 at::Tensor block_diag_generated_plumbing(at::TensorList tensors) {
2469   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2470   auto maybe_layer = maybeCurrentDynamicLayer();
2471   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2472   int64_t cur_level = maybe_layer->layerId();
2473   if (!isBatchedAtLevel(tensors, cur_level)) {
2474     return at::_ops::block_diag::call(tensors);
2475   }
2476 
2477   auto results = batch_rule(tensors);
2478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2479 }
2480 template <typename batch_rule_t, batch_rule_t batch_rule>
2481 at::Tensor ceil_generated_plumbing(const at::Tensor & self) {
2482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2483   auto maybe_layer = maybeCurrentDynamicLayer();
2484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2485   int64_t cur_level = maybe_layer->layerId();
2486   if (!isBatchedAtLevel(self, cur_level)) {
2487     return at::_ops::ceil::call(self);
2488   }
2489   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2490   auto results = batch_rule(self_value, self_bdim);
2491   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2492 }
2493 template <typename batch_rule_t, batch_rule_t batch_rule>
2494 at::Tensor & ceil__generated_plumbing(at::Tensor & self) {
2495   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2496   auto maybe_layer = maybeCurrentDynamicLayer();
2497   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2498   int64_t cur_level = maybe_layer->layerId();
2499   if (!isBatchedAtLevel(self, cur_level)) {
2500     return at::_ops::ceil_::call(self);
2501   }
2502   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2503   batch_rule(self_value, self_bdim);
2504   return self;
2505 }
2506 template <typename batch_rule_t, batch_rule_t batch_rule>
2507 at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) {
2508   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2509   auto maybe_layer = maybeCurrentDynamicLayer();
2510   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2511   int64_t cur_level = maybe_layer->layerId();
2512   if (!isBatchedAtLevel(matrices, cur_level)) {
2513     return at::_ops::chain_matmul::call(matrices);
2514   }
2515 
2516   auto results = batch_rule(matrices);
2517   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2518 }
2519 template <typename batch_rule_t, batch_rule_t batch_rule>
2520 ::std::vector<at::Tensor> unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
2521   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2522   auto maybe_layer = maybeCurrentDynamicLayer();
2523   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2524   int64_t cur_level = maybe_layer->layerId();
2525   if (!isBatchedAtLevel(self, cur_level)) {
2526     return at::_ops::unsafe_chunk::call(self, chunks, dim);
2527   }
2528   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2529   auto results = batch_rule(self_value, self_bdim, chunks, dim);
2530   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2531 }
2532 template <typename batch_rule_t, batch_rule_t batch_rule>
2533 ::std::vector<at::Tensor> chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
2534   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2535   auto maybe_layer = maybeCurrentDynamicLayer();
2536   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2537   int64_t cur_level = maybe_layer->layerId();
2538   if (!isBatchedAtLevel(self, cur_level)) {
2539     return at::_ops::chunk::call(self, chunks, dim);
2540   }
2541   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2542   auto results = batch_rule(self_value, self_bdim, chunks, dim);
2543   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2544 }
2545 template <typename batch_rule_t, batch_rule_t batch_rule>
2546 ::std::vector<at::Tensor> tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
2547   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2548   auto maybe_layer = maybeCurrentDynamicLayer();
2549   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2550   int64_t cur_level = maybe_layer->layerId();
2551   if (!isBatchedAtLevel(self, cur_level)) {
2552     return at::_ops::tensor_split_sections::call(self, sections, dim);
2553   }
2554   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2555   auto results = batch_rule(self_value, self_bdim, sections, dim);
2556   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2557 }
2558 template <typename batch_rule_t, batch_rule_t batch_rule>
2559 ::std::vector<at::Tensor> tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
2560   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2561   auto maybe_layer = maybeCurrentDynamicLayer();
2562   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2563   int64_t cur_level = maybe_layer->layerId();
2564   if (!isBatchedAtLevel(self, cur_level)) {
2565     return at::_ops::tensor_split_indices::call(self, indices, dim);
2566   }
2567   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2568   auto results = batch_rule(self_value, self_bdim, indices, dim);
2569   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2570 }
2571 template <typename batch_rule_t, batch_rule_t batch_rule>
2572 ::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
2573   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2574   auto maybe_layer = maybeCurrentDynamicLayer();
2575   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2576   int64_t cur_level = maybe_layer->layerId();
2577   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) {
2578     return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim);
2579   }
2580   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2581   auto [tensor_indices_or_sections_value, tensor_indices_or_sections_bdim] = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level);
2582   auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim);
2583   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2584 }
2585 template <typename batch_rule_t, batch_rule_t batch_rule>
2586 at::Tensor clamp_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
2587   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2588   auto maybe_layer = maybeCurrentDynamicLayer();
2589   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2590   int64_t cur_level = maybe_layer->layerId();
2591   if (!isBatchedAtLevel(self, cur_level)) {
2592     return at::_ops::clamp::call(self, min, max);
2593   }
2594   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2595   auto results = batch_rule(self_value, self_bdim, min, max);
2596   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2597 }
2598 template <typename batch_rule_t, batch_rule_t batch_rule>
2599 at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
2600   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2601   auto maybe_layer = maybeCurrentDynamicLayer();
2602   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2603   int64_t cur_level = maybe_layer->layerId();
2604   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2605     return at::_ops::clamp_Tensor::call(self, min, max);
2606   }
2607   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2608   std::optional<Tensor> min_value;
2609   std::optional<int64_t> min_bdim;
2610   if (min) {
2611       std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
2612   }
2613   std::optional<Tensor> max_value;
2614   std::optional<int64_t> max_bdim;
2615   if (max) {
2616       std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
2617   }
2618   auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
2619   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2620 }
2621 template <typename batch_rule_t, batch_rule_t batch_rule>
2622 at::Tensor & clamp__generated_plumbing(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
2623   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2624   auto maybe_layer = maybeCurrentDynamicLayer();
2625   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2626   int64_t cur_level = maybe_layer->layerId();
2627   if (!isBatchedAtLevel(self, cur_level)) {
2628     return at::_ops::clamp_::call(self, min, max);
2629   }
2630   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2631   batch_rule(self_value, self_bdim, min, max);
2632   return self;
2633 }
2634 template <typename batch_rule_t, batch_rule_t batch_rule>
2635 at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
2636   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2637   auto maybe_layer = maybeCurrentDynamicLayer();
2638   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2639   int64_t cur_level = maybe_layer->layerId();
2640   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2641     return at::_ops::clamp__Tensor::call(self, min, max);
2642   }
2643   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2644   std::optional<Tensor> min_value;
2645   std::optional<int64_t> min_bdim;
2646   if (min) {
2647       std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
2648   }
2649   std::optional<Tensor> max_value;
2650   std::optional<int64_t> max_bdim;
2651   if (max) {
2652       std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
2653   }
2654   batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
2655   return self;
2656 }
2657 template <typename batch_rule_t, batch_rule_t batch_rule>
2658 at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) {
2659   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2660   auto maybe_layer = maybeCurrentDynamicLayer();
2661   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2662   int64_t cur_level = maybe_layer->layerId();
2663   if (!isBatchedAtLevel(self, cur_level)) {
2664     return at::_ops::clamp_max::call(self, max);
2665   }
2666   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2667   auto results = batch_rule(self_value, self_bdim, max);
2668   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2669 }
2670 template <typename batch_rule_t, batch_rule_t batch_rule>
2671 at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) {
2672   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2673   auto maybe_layer = maybeCurrentDynamicLayer();
2674   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2675   int64_t cur_level = maybe_layer->layerId();
2676   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2677     return at::_ops::clamp_max_Tensor::call(self, max);
2678   }
2679   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2680   auto [max_value, max_bdim] = unwrapTensorAtLevel(max, cur_level);
2681   auto results = batch_rule(self_value, self_bdim, max_value, max_bdim);
2682   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2683 }
2684 template <typename batch_rule_t, batch_rule_t batch_rule>
2685 at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) {
2686   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2687   auto maybe_layer = maybeCurrentDynamicLayer();
2688   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2689   int64_t cur_level = maybe_layer->layerId();
2690   if (!isBatchedAtLevel(self, cur_level)) {
2691     return at::_ops::clamp_max_::call(self, max);
2692   }
2693   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2694   batch_rule(self_value, self_bdim, max);
2695   return self;
2696 }
2697 template <typename batch_rule_t, batch_rule_t batch_rule>
2698 at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) {
2699   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2700   auto maybe_layer = maybeCurrentDynamicLayer();
2701   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2702   int64_t cur_level = maybe_layer->layerId();
2703   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2704     return at::_ops::clamp_max__Tensor::call(self, max);
2705   }
2706   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2707   auto [max_value, max_bdim] = unwrapTensorAtLevel(max, cur_level);
2708   batch_rule(self_value, self_bdim, max_value, max_bdim);
2709   return self;
2710 }
2711 template <typename batch_rule_t, batch_rule_t batch_rule>
2712 at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) {
2713   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2714   auto maybe_layer = maybeCurrentDynamicLayer();
2715   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2716   int64_t cur_level = maybe_layer->layerId();
2717   if (!isBatchedAtLevel(self, cur_level)) {
2718     return at::_ops::clamp_min::call(self, min);
2719   }
2720   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2721   auto results = batch_rule(self_value, self_bdim, min);
2722   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2723 }
2724 template <typename batch_rule_t, batch_rule_t batch_rule>
2725 at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) {
2726   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2727   auto maybe_layer = maybeCurrentDynamicLayer();
2728   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2729   int64_t cur_level = maybe_layer->layerId();
2730   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
2731     return at::_ops::clamp_min_Tensor::call(self, min);
2732   }
2733   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2734   auto [min_value, min_bdim] = unwrapTensorAtLevel(min, cur_level);
2735   auto results = batch_rule(self_value, self_bdim, min_value, min_bdim);
2736   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2737 }
2738 template <typename batch_rule_t, batch_rule_t batch_rule>
2739 at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) {
2740   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2741   auto maybe_layer = maybeCurrentDynamicLayer();
2742   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2743   int64_t cur_level = maybe_layer->layerId();
2744   if (!isBatchedAtLevel(self, cur_level)) {
2745     return at::_ops::clamp_min_::call(self, min);
2746   }
2747   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2748   batch_rule(self_value, self_bdim, min);
2749   return self;
2750 }
2751 template <typename batch_rule_t, batch_rule_t batch_rule>
2752 at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) {
2753   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2754   auto maybe_layer = maybeCurrentDynamicLayer();
2755   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2756   int64_t cur_level = maybe_layer->layerId();
2757   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
2758     return at::_ops::clamp_min__Tensor::call(self, min);
2759   }
2760   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2761   auto [min_value, min_bdim] = unwrapTensorAtLevel(min, cur_level);
2762   batch_rule(self_value, self_bdim, min_value, min_bdim);
2763   return self;
2764 }
2765 template <typename batch_rule_t, batch_rule_t batch_rule>
2766 at::Tensor clip_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
2767   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2768   auto maybe_layer = maybeCurrentDynamicLayer();
2769   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2770   int64_t cur_level = maybe_layer->layerId();
2771   if (!isBatchedAtLevel(self, cur_level)) {
2772     return at::_ops::clip::call(self, min, max);
2773   }
2774   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2775   auto results = batch_rule(self_value, self_bdim, min, max);
2776   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2777 }
2778 template <typename batch_rule_t, batch_rule_t batch_rule>
2779 at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
2780   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2781   auto maybe_layer = maybeCurrentDynamicLayer();
2782   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2783   int64_t cur_level = maybe_layer->layerId();
2784   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2785     return at::_ops::clip_Tensor::call(self, min, max);
2786   }
2787   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2788   std::optional<Tensor> min_value;
2789   std::optional<int64_t> min_bdim;
2790   if (min) {
2791       std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
2792   }
2793   std::optional<Tensor> max_value;
2794   std::optional<int64_t> max_bdim;
2795   if (max) {
2796       std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
2797   }
2798   auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
2799   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2800 }
2801 template <typename batch_rule_t, batch_rule_t batch_rule>
2802 at::Tensor & clip__generated_plumbing(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
2803   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2804   auto maybe_layer = maybeCurrentDynamicLayer();
2805   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2806   int64_t cur_level = maybe_layer->layerId();
2807   if (!isBatchedAtLevel(self, cur_level)) {
2808     return at::_ops::clip_::call(self, min, max);
2809   }
2810   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2811   batch_rule(self_value, self_bdim, min, max);
2812   return self;
2813 }
2814 template <typename batch_rule_t, batch_rule_t batch_rule>
2815 at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
2816   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2817   auto maybe_layer = maybeCurrentDynamicLayer();
2818   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2819   int64_t cur_level = maybe_layer->layerId();
2820   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2821     return at::_ops::clip__Tensor::call(self, min, max);
2822   }
2823   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2824   std::optional<Tensor> min_value;
2825   std::optional<int64_t> min_bdim;
2826   if (min) {
2827       std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
2828   }
2829   std::optional<Tensor> max_value;
2830   std::optional<int64_t> max_bdim;
2831   if (max) {
2832       std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
2833   }
2834   batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
2835   return self;
2836 }
2837 template <typename batch_rule_t, batch_rule_t batch_rule>
2838 at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) {
2839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2840   auto maybe_layer = maybeCurrentDynamicLayer();
2841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2842   int64_t cur_level = maybe_layer->layerId();
2843   if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) {
2844     return at::_ops::complex::call(real, imag);
2845   }
2846   auto [real_value, real_bdim] = unwrapTensorAtLevel(real, cur_level);
2847   auto [imag_value, imag_bdim] = unwrapTensorAtLevel(imag, cur_level);
2848   auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim);
2849   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2850 }
2851 template <typename batch_rule_t, batch_rule_t batch_rule>
2852 at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) {
2853   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2854   auto maybe_layer = maybeCurrentDynamicLayer();
2855   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2856   int64_t cur_level = maybe_layer->layerId();
2857   if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) {
2858     return at::_ops::polar::call(abs, angle);
2859   }
2860   auto [abs_value, abs_bdim] = unwrapTensorAtLevel(abs, cur_level);
2861   auto [angle_value, angle_bdim] = unwrapTensorAtLevel(angle, cur_level);
2862   auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim);
2863   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2864 }
2865 template <typename batch_rule_t, batch_rule_t batch_rule>
2866 at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
2867   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2868   auto maybe_layer = maybeCurrentDynamicLayer();
2869   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2870   int64_t cur_level = maybe_layer->layerId();
2871   if (!isBatchedAtLevel(self, cur_level)) {
2872     return at::_ops::constant_pad_nd::call(self, pad, value);
2873   }
2874   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2875   auto results = batch_rule(self_value, self_bdim, pad, value);
2876   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2877 }
2878 template <typename batch_rule_t, batch_rule_t batch_rule>
2879 at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) {
2880   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2881   auto maybe_layer = maybeCurrentDynamicLayer();
2882   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2883   int64_t cur_level = maybe_layer->layerId();
2884   if (!isBatchedAtLevel(self, cur_level)) {
2885     return at::_ops::contiguous::call(self, memory_format);
2886   }
2887   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
2888   auto results = batch_rule(self_value, self_bdim, memory_format);
2889   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2890 }
2891 template <typename batch_rule_t, batch_rule_t batch_rule>
2892 at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
2893   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2894   auto maybe_layer = maybeCurrentDynamicLayer();
2895   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2896   int64_t cur_level = maybe_layer->layerId();
2897   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
2898     return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
2899   }
2900   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
2901   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2902   std::optional<Tensor> bias_value;
2903   std::optional<int64_t> bias_bdim;
2904   if (bias) {
2905       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2906   }
2907   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
2908   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2909 }
2910 template <typename batch_rule_t, batch_rule_t batch_rule>
2911 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
2912   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2913   auto maybe_layer = maybeCurrentDynamicLayer();
2914   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2915   int64_t cur_level = maybe_layer->layerId();
2916   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
2917     return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
2918   }
2919   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
2920   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
2921   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2922   auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
2923   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
2924 }
2925 template <typename batch_rule_t, batch_rule_t batch_rule>
2926 at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
2927   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2928   auto maybe_layer = maybeCurrentDynamicLayer();
2929   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2930   int64_t cur_level = maybe_layer->layerId();
2931   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
2932     return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
2933   }
2934   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
2935   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2936   std::optional<Tensor> bias_value;
2937   std::optional<int64_t> bias_bdim;
2938   if (bias) {
2939       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2940   }
2941   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
2942   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2943 }
2944 template <typename batch_rule_t, batch_rule_t batch_rule>
2945 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
2946   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2947   auto maybe_layer = maybeCurrentDynamicLayer();
2948   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2949   int64_t cur_level = maybe_layer->layerId();
2950   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
2951     return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
2952   }
2953   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
2954   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
2955   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2956   auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
2957   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
2958 }
2959 template <typename batch_rule_t, batch_rule_t batch_rule>
2960 at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
2961   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2962   auto maybe_layer = maybeCurrentDynamicLayer();
2963   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2964   int64_t cur_level = maybe_layer->layerId();
2965   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
2966     return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
2967   }
2968   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
2969   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2970   std::optional<Tensor> bias_value;
2971   std::optional<int64_t> bias_bdim;
2972   if (bias) {
2973       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2974   }
2975   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
2976   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2977 }
2978 template <typename batch_rule_t, batch_rule_t batch_rule>
2979 at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
2980   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2981   auto maybe_layer = maybeCurrentDynamicLayer();
2982   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2983   int64_t cur_level = maybe_layer->layerId();
2984   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
2985     return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
2986   }
2987   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
2988   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
2989   std::optional<Tensor> bias_value;
2990   std::optional<int64_t> bias_bdim;
2991   if (bias) {
2992       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2993   }
2994   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
2995   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2996 }
2997 template <typename batch_rule_t, batch_rule_t batch_rule>
2998 at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
2999   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3000   auto maybe_layer = maybeCurrentDynamicLayer();
3001   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3002   int64_t cur_level = maybe_layer->layerId();
3003   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3004     return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
3005   }
3006   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3007   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3008   std::optional<Tensor> bias_value;
3009   std::optional<int64_t> bias_bdim;
3010   if (bias) {
3011       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3012   }
3013   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3014   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3015 }
3016 template <typename batch_rule_t, batch_rule_t batch_rule>
3017 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_generated_plumbing(const ::std::optional<at::Tensor> & ggI, const ::std::optional<at::Tensor> & ggW, const ::std::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
3018   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3019   auto maybe_layer = maybeCurrentDynamicLayer();
3020   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3021   int64_t cur_level = maybe_layer->layerId();
3022   if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) {
3023     return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3024   }
3025   auto [gO_value, gO_bdim] = unwrapTensorAtLevel(gO, cur_level);
3026   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3027   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3028   std::optional<Tensor> ggI_value;
3029   std::optional<int64_t> ggI_bdim;
3030   if (ggI) {
3031       std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level);
3032   }
3033   std::optional<Tensor> ggW_value;
3034   std::optional<int64_t> ggW_bdim;
3035   if (ggW) {
3036       std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level);
3037   }
3038   std::optional<Tensor> ggb_value;
3039   std::optional<int64_t> ggb_bdim;
3040   if (ggb) {
3041       std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level);
3042   }
3043   auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3044   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3045 }
3046 template <typename batch_rule_t, batch_rule_t batch_rule>
3047 at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3048   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3049   auto maybe_layer = maybeCurrentDynamicLayer();
3050   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3051   int64_t cur_level = maybe_layer->layerId();
3052   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3053     return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups);
3054   }
3055   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3056   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3057   std::optional<Tensor> bias_value;
3058   std::optional<int64_t> bias_bdim;
3059   if (bias) {
3060       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3061   }
3062   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3063   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3064 }
3065 template <typename batch_rule_t, batch_rule_t batch_rule>
3066 at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3067   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3068   auto maybe_layer = maybeCurrentDynamicLayer();
3069   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3070   int64_t cur_level = maybe_layer->layerId();
3071   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3072     return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups);
3073   }
3074   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3075   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3076   std::optional<Tensor> bias_value;
3077   std::optional<int64_t> bias_bdim;
3078   if (bias) {
3079       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3080   }
3081   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3082   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3083 }
3084 template <typename batch_rule_t, batch_rule_t batch_rule>
3085 at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3086   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3087   auto maybe_layer = maybeCurrentDynamicLayer();
3088   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3089   int64_t cur_level = maybe_layer->layerId();
3090   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3091     return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups);
3092   }
3093   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3094   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3095   std::optional<Tensor> bias_value;
3096   std::optional<int64_t> bias_bdim;
3097   if (bias) {
3098       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3099   }
3100   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3101   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3102 }
3103 template <typename batch_rule_t, batch_rule_t batch_rule>
3104 at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3105   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3106   auto maybe_layer = maybeCurrentDynamicLayer();
3107   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3108   int64_t cur_level = maybe_layer->layerId();
3109   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3110     return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups);
3111   }
3112   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3113   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3114   std::optional<Tensor> bias_value;
3115   std::optional<int64_t> bias_bdim;
3116   if (bias) {
3117       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3118   }
3119   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3120   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3121 }
3122 template <typename batch_rule_t, batch_rule_t batch_rule>
3123 at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3124   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3125   auto maybe_layer = maybeCurrentDynamicLayer();
3126   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3127   int64_t cur_level = maybe_layer->layerId();
3128   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3129     return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups);
3130   }
3131   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3132   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3133   std::optional<Tensor> bias_value;
3134   std::optional<int64_t> bias_bdim;
3135   if (bias) {
3136       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3137   }
3138   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3139   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3140 }
3141 template <typename batch_rule_t, batch_rule_t batch_rule>
3142 at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3143   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3144   auto maybe_layer = maybeCurrentDynamicLayer();
3145   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3146   int64_t cur_level = maybe_layer->layerId();
3147   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3148     return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups);
3149   }
3150   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3151   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3152   std::optional<Tensor> bias_value;
3153   std::optional<int64_t> bias_bdim;
3154   if (bias) {
3155       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3156   }
3157   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3158   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3159 }
3160 template <typename batch_rule_t, batch_rule_t batch_rule>
3161 at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
3162   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3163   auto maybe_layer = maybeCurrentDynamicLayer();
3164   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3165   int64_t cur_level = maybe_layer->layerId();
3166   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3167     return at::_ops::conv_tbc::call(self, weight, bias, pad);
3168   }
3169   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3170   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3171   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
3172   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
3173   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3174 }
3175 template <typename batch_rule_t, batch_rule_t batch_rule>
3176 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
3177   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3178   auto maybe_layer = maybeCurrentDynamicLayer();
3179   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3180   int64_t cur_level = maybe_layer->layerId();
3181   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3182     return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
3183   }
3184   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3185   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3186   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3187   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
3188   auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
3189   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3190 }
3191 template <typename batch_rule_t, batch_rule_t batch_rule>
3192 at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
3193   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3194   auto maybe_layer = maybeCurrentDynamicLayer();
3195   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3196   int64_t cur_level = maybe_layer->layerId();
3197   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3198     return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
3199   }
3200   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3201   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3202   std::optional<Tensor> bias_value;
3203   std::optional<int64_t> bias_bdim;
3204   if (bias) {
3205       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3206   }
3207   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
3208   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3209 }
3210 template <typename batch_rule_t, batch_rule_t batch_rule>
3211 at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
3212   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3213   auto maybe_layer = maybeCurrentDynamicLayer();
3214   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3215   int64_t cur_level = maybe_layer->layerId();
3216   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3217     return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
3218   }
3219   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3220   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3221   std::optional<Tensor> bias_value;
3222   std::optional<int64_t> bias_bdim;
3223   if (bias) {
3224       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3225   }
3226   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
3227   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3228 }
3229 template <typename batch_rule_t, batch_rule_t batch_rule>
3230 at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
3231   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3232   auto maybe_layer = maybeCurrentDynamicLayer();
3233   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3234   int64_t cur_level = maybe_layer->layerId();
3235   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3236     return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
3237   }
3238   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3239   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3240   std::optional<Tensor> bias_value;
3241   std::optional<int64_t> bias_bdim;
3242   if (bias) {
3243       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3244   }
3245   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
3246   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3247 }
3248 template <typename batch_rule_t, batch_rule_t batch_rule>
3249 at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
3250   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3251   auto maybe_layer = maybeCurrentDynamicLayer();
3252   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3253   int64_t cur_level = maybe_layer->layerId();
3254   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
3255     return at::_ops::copy::call(self, src, non_blocking);
3256   }
3257   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3258   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
3259   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
3260   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3261 }
3262 template <typename batch_rule_t, batch_rule_t batch_rule>
3263 at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
3264   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3265   auto maybe_layer = maybeCurrentDynamicLayer();
3266   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3267   int64_t cur_level = maybe_layer->layerId();
3268   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
3269     return at::_ops::copy_::call(self, src, non_blocking);
3270   }
3271   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3272   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
3273   batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
3274   return self;
3275 }
3276 template <typename batch_rule_t, batch_rule_t batch_rule>
3277 at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
3278   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3279   auto maybe_layer = maybeCurrentDynamicLayer();
3280   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3281   int64_t cur_level = maybe_layer->layerId();
3282   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
3283     return at::_ops::_copy_from::call(self, dst, non_blocking);
3284   }
3285   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3286   auto [dst_value, dst_bdim] = unwrapTensorAtLevel(dst, cur_level);
3287   auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking);
3288   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3289 }
3290 template <typename batch_rule_t, batch_rule_t batch_rule>
3291 at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) {
3292   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3293   auto maybe_layer = maybeCurrentDynamicLayer();
3294   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3295   int64_t cur_level = maybe_layer->layerId();
3296   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
3297     return at::_ops::_copy_from_and_resize::call(self, dst);
3298   }
3299   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3300   auto [dst_value, dst_bdim] = unwrapTensorAtLevel(dst, cur_level);
3301   auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim);
3302   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3303 }
3304 template <typename batch_rule_t, batch_rule_t batch_rule>
3305 at::Tensor cos_generated_plumbing(const at::Tensor & self) {
3306   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3307   auto maybe_layer = maybeCurrentDynamicLayer();
3308   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3309   int64_t cur_level = maybe_layer->layerId();
3310   if (!isBatchedAtLevel(self, cur_level)) {
3311     return at::_ops::cos::call(self);
3312   }
3313   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3314   auto results = batch_rule(self_value, self_bdim);
3315   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3316 }
3317 template <typename batch_rule_t, batch_rule_t batch_rule>
3318 at::Tensor & cos__generated_plumbing(at::Tensor & self) {
3319   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3320   auto maybe_layer = maybeCurrentDynamicLayer();
3321   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3322   int64_t cur_level = maybe_layer->layerId();
3323   if (!isBatchedAtLevel(self, cur_level)) {
3324     return at::_ops::cos_::call(self);
3325   }
3326   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3327   batch_rule(self_value, self_bdim);
3328   return self;
3329 }
3330 template <typename batch_rule_t, batch_rule_t batch_rule>
3331 at::Tensor cosh_generated_plumbing(const at::Tensor & self) {
3332   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3333   auto maybe_layer = maybeCurrentDynamicLayer();
3334   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3335   int64_t cur_level = maybe_layer->layerId();
3336   if (!isBatchedAtLevel(self, cur_level)) {
3337     return at::_ops::cosh::call(self);
3338   }
3339   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3340   auto results = batch_rule(self_value, self_bdim);
3341   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3342 }
3343 template <typename batch_rule_t, batch_rule_t batch_rule>
3344 at::Tensor & cosh__generated_plumbing(at::Tensor & self) {
3345   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3346   auto maybe_layer = maybeCurrentDynamicLayer();
3347   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3348   int64_t cur_level = maybe_layer->layerId();
3349   if (!isBatchedAtLevel(self, cur_level)) {
3350     return at::_ops::cosh_::call(self);
3351   }
3352   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3353   batch_rule(self_value, self_bdim);
3354   return self;
3355 }
3356 template <typename batch_rule_t, batch_rule_t batch_rule>
3357 at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
3358   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3359   auto maybe_layer = maybeCurrentDynamicLayer();
3360   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3361   int64_t cur_level = maybe_layer->layerId();
3362   if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
3363     return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction);
3364   }
3365   auto [input1_value, input1_bdim] = unwrapTensorAtLevel(input1, cur_level);
3366   auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level);
3367   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
3368   auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
3369   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3370 }
3371 template <typename batch_rule_t, batch_rule_t batch_rule>
3372 at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
3373   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3374   auto maybe_layer = maybeCurrentDynamicLayer();
3375   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3376   int64_t cur_level = maybe_layer->layerId();
3377   if (!isBatchedAtLevel(self, cur_level)) {
3378     return at::_ops::count_nonzero_dim_IntList::call(self, dim);
3379   }
3380   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3381   auto results = batch_rule(self_value, self_bdim, dim);
3382   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3383 }
3384 template <typename batch_rule_t, batch_rule_t batch_rule>
3385 at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dim) {
3386   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3387   auto maybe_layer = maybeCurrentDynamicLayer();
3388   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3389   int64_t cur_level = maybe_layer->layerId();
3390   if (!isBatchedAtLevel(self, cur_level)) {
3391     return at::_ops::count_nonzero::call(self, dim);
3392   }
3393   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3394   auto results = batch_rule(self_value, self_bdim, dim);
3395   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3396 }
3397 template <typename batch_rule_t, batch_rule_t batch_rule>
3398 at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const ::std::optional<at::Tensor> & fweights, const ::std::optional<at::Tensor> & aweights) {
3399   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3400   auto maybe_layer = maybeCurrentDynamicLayer();
3401   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3402   int64_t cur_level = maybe_layer->layerId();
3403   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) {
3404     return at::_ops::cov::call(self, correction, fweights, aweights);
3405   }
3406   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3407   std::optional<Tensor> fweights_value;
3408   std::optional<int64_t> fweights_bdim;
3409   if (fweights) {
3410       std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level);
3411   }
3412   std::optional<Tensor> aweights_value;
3413   std::optional<int64_t> aweights_bdim;
3414   if (aweights) {
3415       std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level);
3416   }
3417   auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim);
3418   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3419 }
3420 template <typename batch_rule_t, batch_rule_t batch_rule>
3421 at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) {
3422   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3423   auto maybe_layer = maybeCurrentDynamicLayer();
3424   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3425   int64_t cur_level = maybe_layer->layerId();
3426   if (!isBatchedAtLevel(self, cur_level)) {
3427     return at::_ops::corrcoef::call(self);
3428   }
3429   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3430   auto results = batch_rule(self_value, self_bdim);
3431   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3432 }
3433 template <typename batch_rule_t, batch_rule_t batch_rule>
3434 at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
3435   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3436   auto maybe_layer = maybeCurrentDynamicLayer();
3437   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3438   int64_t cur_level = maybe_layer->layerId();
3439   if (!isBatchedAtLevel(theta, cur_level)) {
3440     return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
3441   }
3442   auto [theta_value, theta_bdim] = unwrapTensorAtLevel(theta, cur_level);
3443   auto results = batch_rule(theta_value, theta_bdim, N, C, H, W);
3444   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3445 }
3446 template <typename batch_rule_t, batch_rule_t batch_rule>
3447 at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
3448   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3449   auto maybe_layer = maybeCurrentDynamicLayer();
3450   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3451   int64_t cur_level = maybe_layer->layerId();
3452   if (!isBatchedAtLevel(grad, cur_level)) {
3453     return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W);
3454   }
3455   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
3456   auto results = batch_rule(grad_value, grad_bdim, N, C, H, W);
3457   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3458 }
3459 template <typename batch_rule_t, batch_rule_t batch_rule>
3460 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
3461   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3462   auto maybe_layer = maybeCurrentDynamicLayer();
3463   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3464   int64_t cur_level = maybe_layer->layerId();
3465   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
3466     return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
3467   }
3468   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3469   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3470   std::optional<Tensor> bias_value;
3471   std::optional<int64_t> bias_bdim;
3472   if (bias) {
3473       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3474   }
3475   std::optional<Tensor> running_mean_value;
3476   std::optional<int64_t> running_mean_bdim;
3477   if (running_mean) {
3478       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
3479   }
3480   std::optional<Tensor> running_var_value;
3481   std::optional<int64_t> running_var_bdim;
3482   if (running_var) {
3483       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
3484   }
3485   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
3486   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
3487 }
3488 template <typename batch_rule_t, batch_rule_t batch_rule>
3489 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
3490   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3491   auto maybe_layer = maybeCurrentDynamicLayer();
3492   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3493   int64_t cur_level = maybe_layer->layerId();
3494   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) {
3495     return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
3496   }
3497   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3498   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
3499   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3500   auto [reserveSpace_value, reserveSpace_bdim] = unwrapTensorAtLevel(reserveSpace, cur_level);
3501   std::optional<Tensor> running_mean_value;
3502   std::optional<int64_t> running_mean_bdim;
3503   if (running_mean) {
3504       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
3505   }
3506   std::optional<Tensor> running_var_value;
3507   std::optional<int64_t> running_var_bdim;
3508   if (running_var) {
3509       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
3510   }
3511   std::optional<Tensor> save_mean_value;
3512   std::optional<int64_t> save_mean_bdim;
3513   if (save_mean) {
3514       std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
3515   }
3516   std::optional<Tensor> save_var_value;
3517   std::optional<int64_t> save_var_bdim;
3518   if (save_var) {
3519       std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
3520   }
3521   auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim);
3522   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3523 }
3524 template <typename batch_rule_t, batch_rule_t batch_rule>
3525 at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
3526   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3527   auto maybe_layer = maybeCurrentDynamicLayer();
3528   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3529   int64_t cur_level = maybe_layer->layerId();
3530   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
3531     return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
3532   }
3533   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3534   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3535   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
3536   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3537 }
3538 template <typename batch_rule_t, batch_rule_t batch_rule>
3539 at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
3540   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3541   auto maybe_layer = maybeCurrentDynamicLayer();
3542   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3543   int64_t cur_level = maybe_layer->layerId();
3544   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
3545     return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
3546   }
3547   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3548   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3549   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
3550   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3551 }
3552 template <typename batch_rule_t, batch_rule_t batch_rule>
3553 at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3554   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3555   auto maybe_layer = maybeCurrentDynamicLayer();
3556   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3557   int64_t cur_level = maybe_layer->layerId();
3558   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
3559     return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups);
3560   }
3561   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3562   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3563   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups);
3564   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3565 }
3566 template <typename batch_rule_t, batch_rule_t batch_rule>
3567 ::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask) {
3568   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3569   auto maybe_layer = maybeCurrentDynamicLayer();
3570   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3571   int64_t cur_level = maybe_layer->layerId();
3572   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
3573     return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
3574   }
3575   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3576   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
3577   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3578   auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask);
3579   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3580 }
3581 template <typename batch_rule_t, batch_rule_t batch_rule>
3582 at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3583   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3584   auto maybe_layer = maybeCurrentDynamicLayer();
3585   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3586   int64_t cur_level = maybe_layer->layerId();
3587   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3588     return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
3589   }
3590   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3591   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3592   std::optional<Tensor> bias_value;
3593   std::optional<int64_t> bias_bdim;
3594   if (bias) {
3595       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3596   }
3597   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3598   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3599 }
3600 template <typename batch_rule_t, batch_rule_t batch_rule>
3601 at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
3602   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3603   auto maybe_layer = maybeCurrentDynamicLayer();
3604   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3605   int64_t cur_level = maybe_layer->layerId();
3606   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3607     return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
3608   }
3609   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3610   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
3611   auto [z_value, z_bdim] = unwrapTensorAtLevel(z, cur_level);
3612   std::optional<Tensor> bias_value;
3613   std::optional<int64_t> bias_bdim;
3614   if (bias) {
3615       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3616   }
3617   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
3618   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3619 }
3620 template <typename batch_rule_t, batch_rule_t batch_rule>
3621 at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) {
3622   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3623   auto maybe_layer = maybeCurrentDynamicLayer();
3624   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3625   int64_t cur_level = maybe_layer->layerId();
3626   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
3627     return at::_ops::cudnn_grid_sampler::call(self, grid);
3628   }
3629   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3630   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
3631   auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim);
3632   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3633 }
3634 template <typename batch_rule_t, batch_rule_t batch_rule>
3635 ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
3636   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3637   auto maybe_layer = maybeCurrentDynamicLayer();
3638   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3639   int64_t cur_level = maybe_layer->layerId();
3640   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) {
3641     return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
3642   }
3643   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3644   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
3645   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
3646   auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim);
3647   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3648 }
3649 template <typename batch_rule_t, batch_rule_t batch_rule>
3650 ::std::tuple<at::Tensor,at::Tensor> cummax_generated_plumbing(const at::Tensor & self, int64_t dim) {
3651   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3652   auto maybe_layer = maybeCurrentDynamicLayer();
3653   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3654   int64_t cur_level = maybe_layer->layerId();
3655   if (!isBatchedAtLevel(self, cur_level)) {
3656     return at::_ops::cummax::call(self, dim);
3657   }
3658   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3659   auto results = batch_rule(self_value, self_bdim, dim);
3660   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3661 }
3662 template <typename batch_rule_t, batch_rule_t batch_rule>
3663 ::std::tuple<at::Tensor,at::Tensor> cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
3664   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3665   auto maybe_layer = maybeCurrentDynamicLayer();
3666   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3667   int64_t cur_level = maybe_layer->layerId();
3668   if (!isBatchedAtLevel(self, cur_level)) {
3669     return at::_ops::cummax_dimname::call(self, dim);
3670   }
3671   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3672   auto results = batch_rule(self_value, self_bdim, dim);
3673   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3674 }
3675 template <typename batch_rule_t, batch_rule_t batch_rule>
3676 void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
3677   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3678   auto maybe_layer = maybeCurrentDynamicLayer();
3679   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
3680   int64_t cur_level = maybe_layer->layerId();
3681   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
3682     return at::_ops::_cummax_helper::call(self, values, indices, dim);
3683   }
3684   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3685   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
3686   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
3687   batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
3688 }
3689 template <typename batch_rule_t, batch_rule_t batch_rule>
3690 ::std::tuple<at::Tensor,at::Tensor> cummin_generated_plumbing(const at::Tensor & self, int64_t dim) {
3691   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3692   auto maybe_layer = maybeCurrentDynamicLayer();
3693   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3694   int64_t cur_level = maybe_layer->layerId();
3695   if (!isBatchedAtLevel(self, cur_level)) {
3696     return at::_ops::cummin::call(self, dim);
3697   }
3698   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3699   auto results = batch_rule(self_value, self_bdim, dim);
3700   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3701 }
3702 template <typename batch_rule_t, batch_rule_t batch_rule>
3703 ::std::tuple<at::Tensor,at::Tensor> cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
3704   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3705   auto maybe_layer = maybeCurrentDynamicLayer();
3706   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3707   int64_t cur_level = maybe_layer->layerId();
3708   if (!isBatchedAtLevel(self, cur_level)) {
3709     return at::_ops::cummin_dimname::call(self, dim);
3710   }
3711   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3712   auto results = batch_rule(self_value, self_bdim, dim);
3713   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3714 }
3715 template <typename batch_rule_t, batch_rule_t batch_rule>
3716 void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
3717   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3718   auto maybe_layer = maybeCurrentDynamicLayer();
3719   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
3720   int64_t cur_level = maybe_layer->layerId();
3721   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
3722     return at::_ops::_cummin_helper::call(self, values, indices, dim);
3723   }
3724   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3725   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
3726   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
3727   batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
3728 }
3729 template <typename batch_rule_t, batch_rule_t batch_rule>
3730 at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
3731   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3732   auto maybe_layer = maybeCurrentDynamicLayer();
3733   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3734   int64_t cur_level = maybe_layer->layerId();
3735   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
3736     return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
3737   }
3738   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
3739   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3740   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
3741   auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim);
3742   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3743 }
3744 template <typename batch_rule_t, batch_rule_t batch_rule>
3745 at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
3746   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3747   auto maybe_layer = maybeCurrentDynamicLayer();
3748   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3749   int64_t cur_level = maybe_layer->layerId();
3750   if (!isBatchedAtLevel(self, cur_level)) {
3751     return at::_ops::cumprod::call(self, dim, dtype);
3752   }
3753   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3754   auto results = batch_rule(self_value, self_bdim, dim, dtype);
3755   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3756 }
3757 template <typename batch_rule_t, batch_rule_t batch_rule>
3758 at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
3759   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3760   auto maybe_layer = maybeCurrentDynamicLayer();
3761   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3762   int64_t cur_level = maybe_layer->layerId();
3763   if (!isBatchedAtLevel(self, cur_level)) {
3764     return at::_ops::cumprod_::call(self, dim, dtype);
3765   }
3766   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3767   batch_rule(self_value, self_bdim, dim, dtype);
3768   return self;
3769 }
3770 template <typename batch_rule_t, batch_rule_t batch_rule>
3771 at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
3772   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3773   auto maybe_layer = maybeCurrentDynamicLayer();
3774   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3775   int64_t cur_level = maybe_layer->layerId();
3776   if (!isBatchedAtLevel(self, cur_level)) {
3777     return at::_ops::cumprod_dimname::call(self, dim, dtype);
3778   }
3779   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3780   auto results = batch_rule(self_value, self_bdim, dim, dtype);
3781   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3782 }
3783 template <typename batch_rule_t, batch_rule_t batch_rule>
3784 at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
3785   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3786   auto maybe_layer = maybeCurrentDynamicLayer();
3787   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3788   int64_t cur_level = maybe_layer->layerId();
3789   if (!isBatchedAtLevel(self, cur_level)) {
3790     return at::_ops::cumprod__dimname::call(self, dim, dtype);
3791   }
3792   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3793   batch_rule(self_value, self_bdim, dim, dtype);
3794   return self;
3795 }
3796 template <typename batch_rule_t, batch_rule_t batch_rule>
3797 at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
3798   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3799   auto maybe_layer = maybeCurrentDynamicLayer();
3800   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3801   int64_t cur_level = maybe_layer->layerId();
3802   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
3803     return at::_ops::cumprod_backward::call(grad, input, dim, output);
3804   }
3805   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
3806   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
3807   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
3808   auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim);
3809   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3810 }
3811 template <typename batch_rule_t, batch_rule_t batch_rule>
3812 at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
3813   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3814   auto maybe_layer = maybeCurrentDynamicLayer();
3815   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3816   int64_t cur_level = maybe_layer->layerId();
3817   if (!isBatchedAtLevel(self, cur_level)) {
3818     return at::_ops::cumsum::call(self, dim, dtype);
3819   }
3820   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3821   auto results = batch_rule(self_value, self_bdim, dim, dtype);
3822   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3823 }
3824 template <typename batch_rule_t, batch_rule_t batch_rule>
3825 at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
3826   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3827   auto maybe_layer = maybeCurrentDynamicLayer();
3828   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3829   int64_t cur_level = maybe_layer->layerId();
3830   if (!isBatchedAtLevel(self, cur_level)) {
3831     return at::_ops::cumsum_::call(self, dim, dtype);
3832   }
3833   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3834   batch_rule(self_value, self_bdim, dim, dtype);
3835   return self;
3836 }
3837 template <typename batch_rule_t, batch_rule_t batch_rule>
3838 at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
3839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3840   auto maybe_layer = maybeCurrentDynamicLayer();
3841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3842   int64_t cur_level = maybe_layer->layerId();
3843   if (!isBatchedAtLevel(self, cur_level)) {
3844     return at::_ops::cumsum_dimname::call(self, dim, dtype);
3845   }
3846   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3847   auto results = batch_rule(self_value, self_bdim, dim, dtype);
3848   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3849 }
3850 template <typename batch_rule_t, batch_rule_t batch_rule>
3851 at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
3852   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3853   auto maybe_layer = maybeCurrentDynamicLayer();
3854   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3855   int64_t cur_level = maybe_layer->layerId();
3856   if (!isBatchedAtLevel(self, cur_level)) {
3857     return at::_ops::cumsum__dimname::call(self, dim, dtype);
3858   }
3859   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3860   batch_rule(self_value, self_bdim, dim, dtype);
3861   return self;
3862 }
3863 template <typename batch_rule_t, batch_rule_t batch_rule>
3864 at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
3865   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3866   auto maybe_layer = maybeCurrentDynamicLayer();
3867   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3868   int64_t cur_level = maybe_layer->layerId();
3869   if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
3870     return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
3871   }
3872   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
3873   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
3874   auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
3875   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3876 }
3877 template <typename batch_rule_t, batch_rule_t batch_rule>
3878 at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
3879   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3880   auto maybe_layer = maybeCurrentDynamicLayer();
3881   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3882   int64_t cur_level = maybe_layer->layerId();
3883   if (!isBatchedAtLevel(y, cur_level)) {
3884     return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
3885   }
3886   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
3887   auto results = batch_rule(y_value, y_bdim, dx, dim);
3888   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3889 }
3890 template <typename batch_rule_t, batch_rule_t batch_rule>
3891 at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
3892   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3893   auto maybe_layer = maybeCurrentDynamicLayer();
3894   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3895   int64_t cur_level = maybe_layer->layerId();
3896   if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
3897     return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
3898   }
3899   auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
3900   auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
3901   auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity);
3902   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3903 }
3904 template <typename batch_rule_t, batch_rule_t batch_rule>
3905 at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
3906   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3907   auto maybe_layer = maybeCurrentDynamicLayer();
3908   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3909   int64_t cur_level = maybe_layer->layerId();
3910   if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
3911     return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
3912   }
3913   auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
3914   auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
3915   auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level);
3916   auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level);
3917   auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity);
3918   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3919 }
3920 template <typename batch_rule_t, batch_rule_t batch_rule>
3921 ::std::tuple<at::Tensor,at::Tensor> _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
3922   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3923   auto maybe_layer = maybeCurrentDynamicLayer();
3924   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3925   int64_t cur_level = maybe_layer->layerId();
3926   if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
3927     return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
3928   }
3929   auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
3930   auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
3931   auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity);
3932   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3933 }
3934 template <typename batch_rule_t, batch_rule_t batch_rule>
3935 ::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
3936   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3937   auto maybe_layer = maybeCurrentDynamicLayer();
3938   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3939   int64_t cur_level = maybe_layer->layerId();
3940   if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
3941     return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
3942   }
3943   auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
3944   auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
3945   auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level);
3946   auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level);
3947   auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity);
3948   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
3949 }
3950 template <typename batch_rule_t, batch_rule_t batch_rule>
3951 at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
3952   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3953   auto maybe_layer = maybeCurrentDynamicLayer();
3954   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3955   int64_t cur_level = maybe_layer->layerId();
3956   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
3957     return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
3958   }
3959   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
3960   auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
3961   auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
3962   auto [neg_log_likelihood_value, neg_log_likelihood_bdim] = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
3963   auto [log_alpha_value, log_alpha_bdim] = unwrapTensorAtLevel(log_alpha, cur_level);
3964   auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
3965   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3966 }
3967 template <typename batch_rule_t, batch_rule_t batch_rule>
3968 at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
3969   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3970   auto maybe_layer = maybeCurrentDynamicLayer();
3971   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3972   int64_t cur_level = maybe_layer->layerId();
3973   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
3974     return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
3975   }
3976   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
3977   auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level);
3978   auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level);
3979   auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level);
3980   auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level);
3981   auto [neg_log_likelihood_value, neg_log_likelihood_bdim] = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
3982   auto [log_alpha_value, log_alpha_bdim] = unwrapTensorAtLevel(log_alpha, cur_level);
3983   auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
3984   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3985 }
3986 template <typename batch_rule_t, batch_rule_t batch_rule>
3987 at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
3988   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3989   auto maybe_layer = maybeCurrentDynamicLayer();
3990   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3991   int64_t cur_level = maybe_layer->layerId();
3992   if (!isBatchedAtLevel(self, cur_level)) {
3993     return at::_ops::diag_embed::call(self, offset, dim1, dim2);
3994   }
3995   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
3996   auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
3997   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3998 }
3999 template <typename batch_rule_t, batch_rule_t batch_rule>
4000 at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) {
4001   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4002   auto maybe_layer = maybeCurrentDynamicLayer();
4003   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4004   int64_t cur_level = maybe_layer->layerId();
4005   if (!isBatchedAtLevel(self, cur_level)) {
4006     return at::_ops::diagflat::call(self, offset);
4007   }
4008   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4009   auto results = batch_rule(self_value, self_bdim, offset);
4010   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4011 }
4012 template <typename batch_rule_t, batch_rule_t batch_rule>
4013 at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
4014   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4015   auto maybe_layer = maybeCurrentDynamicLayer();
4016   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4017   int64_t cur_level = maybe_layer->layerId();
4018   if (!isBatchedAtLevel(self, cur_level)) {
4019     return at::_ops::diagonal::call(self, offset, dim1, dim2);
4020   }
4021   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4022   auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
4023   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4024 }
4025 template <typename batch_rule_t, batch_rule_t batch_rule>
4026 at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
4027   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4028   auto maybe_layer = maybeCurrentDynamicLayer();
4029   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4030   int64_t cur_level = maybe_layer->layerId();
4031   if (!isBatchedAtLevel(A, cur_level)) {
4032     return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2);
4033   }
4034   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
4035   auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2);
4036   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4037 }
4038 template <typename batch_rule_t, batch_rule_t batch_rule>
4039 at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
4040   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4041   auto maybe_layer = maybeCurrentDynamicLayer();
4042   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4043   int64_t cur_level = maybe_layer->layerId();
4044   if (!isBatchedAtLevel(self, cur_level)) {
4045     return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
4046   }
4047   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4048   auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset);
4049   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4050 }
4051 template <typename batch_rule_t, batch_rule_t batch_rule>
4052 at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
4053   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4054   auto maybe_layer = maybeCurrentDynamicLayer();
4055   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4056   int64_t cur_level = maybe_layer->layerId();
4057   if (!isBatchedAtLevel(grad_output, cur_level)) {
4058     return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
4059   }
4060   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
4061   auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2);
4062   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4063 }
4064 template <typename batch_rule_t, batch_rule_t batch_rule>
4065 at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
4066   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4067   auto maybe_layer = maybeCurrentDynamicLayer();
4068   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4069   int64_t cur_level = maybe_layer->layerId();
4070   if (!isBatchedAtLevel(self, cur_level)) {
4071     return at::_ops::fill_diagonal_::call(self, fill_value, wrap);
4072   }
4073   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4074   batch_rule(self_value, self_bdim, fill_value, wrap);
4075   return self;
4076 }
4077 template <typename batch_rule_t, batch_rule_t batch_rule>
4078 at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append) {
4079   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4080   auto maybe_layer = maybeCurrentDynamicLayer();
4081   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4082   int64_t cur_level = maybe_layer->layerId();
4083   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) {
4084     return at::_ops::diff::call(self, n, dim, prepend, append);
4085   }
4086   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4087   std::optional<Tensor> prepend_value;
4088   std::optional<int64_t> prepend_bdim;
4089   if (prepend) {
4090       std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level);
4091   }
4092   std::optional<Tensor> append_value;
4093   std::optional<int64_t> append_bdim;
4094   if (append) {
4095       std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level);
4096   }
4097   auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim);
4098   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4099 }
4100 template <typename batch_rule_t, batch_rule_t batch_rule>
4101 ::std::vector<at::Tensor> gradient_scalarint_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
4102   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4103   auto maybe_layer = maybeCurrentDynamicLayer();
4104   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4105   int64_t cur_level = maybe_layer->layerId();
4106   if (!isBatchedAtLevel(self, cur_level)) {
4107     return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order);
4108   }
4109   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4110   auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4111   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4112 }
4113 template <typename batch_rule_t, batch_rule_t batch_rule>
4114 ::std::vector<at::Tensor> gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
4115   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4116   auto maybe_layer = maybeCurrentDynamicLayer();
4117   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4118   int64_t cur_level = maybe_layer->layerId();
4119   if (!isBatchedAtLevel(self, cur_level)) {
4120     return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order);
4121   }
4122   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4123   auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4124   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4125 }
4126 template <typename batch_rule_t, batch_rule_t batch_rule>
4127 ::std::vector<at::Tensor> gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
4128   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4129   auto maybe_layer = maybeCurrentDynamicLayer();
4130   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4131   int64_t cur_level = maybe_layer->layerId();
4132   if (!isBatchedAtLevel(self, cur_level)) {
4133     return at::_ops::gradient_array::call(self, dim, edge_order);
4134   }
4135   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4136   auto results = batch_rule(self_value, self_bdim, dim, edge_order);
4137   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4138 }
4139 template <typename batch_rule_t, batch_rule_t batch_rule>
4140 ::std::vector<at::Tensor> gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
4141   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4142   auto maybe_layer = maybeCurrentDynamicLayer();
4143   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4144   int64_t cur_level = maybe_layer->layerId();
4145   if (!isBatchedAtLevel(self, cur_level)) {
4146     return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order);
4147   }
4148   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4149   auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4150   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4151 }
4152 template <typename batch_rule_t, batch_rule_t batch_rule>
4153 ::std::vector<at::Tensor> gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
4154   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4155   auto maybe_layer = maybeCurrentDynamicLayer();
4156   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4157   int64_t cur_level = maybe_layer->layerId();
4158   if (!isBatchedAtLevel(self, cur_level)) {
4159     return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order);
4160   }
4161   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4162   auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4163   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4164 }
4165 template <typename batch_rule_t, batch_rule_t batch_rule>
4166 ::std::vector<at::Tensor> gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
4167   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4168   auto maybe_layer = maybeCurrentDynamicLayer();
4169   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4170   int64_t cur_level = maybe_layer->layerId();
4171   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
4172     return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order);
4173   }
4174   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4175   auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4176   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4177 }
4178 template <typename batch_rule_t, batch_rule_t batch_rule>
4179 ::std::vector<at::Tensor> gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
4180   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4181   auto maybe_layer = maybeCurrentDynamicLayer();
4182   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4183   int64_t cur_level = maybe_layer->layerId();
4184   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
4185     return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order);
4186   }
4187   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4188   auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4189   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4190 }
4191 template <typename batch_rule_t, batch_rule_t batch_rule>
4192 at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
4193   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4194   auto maybe_layer = maybeCurrentDynamicLayer();
4195   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4196   int64_t cur_level = maybe_layer->layerId();
4197   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4198     return at::_ops::div_Tensor::call(self, other);
4199   }
4200   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4201   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4202   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
4203   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4204 }
4205 template <typename batch_rule_t, batch_rule_t batch_rule>
4206 at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
4207   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4208   auto maybe_layer = maybeCurrentDynamicLayer();
4209   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4210   int64_t cur_level = maybe_layer->layerId();
4211   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4212     return at::_ops::div__Tensor::call(self, other);
4213   }
4214   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4215   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4216   batch_rule(self_value, self_bdim, other_value, other_bdim);
4217   return self;
4218 }
4219 template <typename batch_rule_t, batch_rule_t batch_rule>
4220 at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
4221   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4222   auto maybe_layer = maybeCurrentDynamicLayer();
4223   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4224   int64_t cur_level = maybe_layer->layerId();
4225   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4226     return at::_ops::div_Tensor_mode::call(self, other, rounding_mode);
4227   }
4228   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4229   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4230   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
4231   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4232 }
4233 template <typename batch_rule_t, batch_rule_t batch_rule>
4234 at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
4235   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4236   auto maybe_layer = maybeCurrentDynamicLayer();
4237   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4238   int64_t cur_level = maybe_layer->layerId();
4239   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4240     return at::_ops::div__Tensor_mode::call(self, other, rounding_mode);
4241   }
4242   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4243   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4244   batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
4245   return self;
4246 }
4247 template <typename batch_rule_t, batch_rule_t batch_rule>
4248 at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
4249   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4250   auto maybe_layer = maybeCurrentDynamicLayer();
4251   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4252   int64_t cur_level = maybe_layer->layerId();
4253   if (!isBatchedAtLevel(self, cur_level)) {
4254     return at::_ops::div_Scalar::call(self, other);
4255   }
4256   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4257   auto results = batch_rule(self_value, self_bdim, other);
4258   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4259 }
4260 template <typename batch_rule_t, batch_rule_t batch_rule>
4261 at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
4262   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4263   auto maybe_layer = maybeCurrentDynamicLayer();
4264   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4265   int64_t cur_level = maybe_layer->layerId();
4266   if (!isBatchedAtLevel(self, cur_level)) {
4267     return at::_ops::div__Scalar::call(self, other);
4268   }
4269   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4270   batch_rule(self_value, self_bdim, other);
4271   return self;
4272 }
4273 template <typename batch_rule_t, batch_rule_t batch_rule>
4274 at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
4275   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4276   auto maybe_layer = maybeCurrentDynamicLayer();
4277   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4278   int64_t cur_level = maybe_layer->layerId();
4279   if (!isBatchedAtLevel(self, cur_level)) {
4280     return at::_ops::div_Scalar_mode::call(self, other, rounding_mode);
4281   }
4282   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4283   auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
4284   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4285 }
4286 template <typename batch_rule_t, batch_rule_t batch_rule>
4287 at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
4288   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4289   auto maybe_layer = maybeCurrentDynamicLayer();
4290   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4291   int64_t cur_level = maybe_layer->layerId();
4292   if (!isBatchedAtLevel(self, cur_level)) {
4293     return at::_ops::div__Scalar_mode::call(self, other, rounding_mode);
4294   }
4295   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4296   batch_rule(self_value, self_bdim, other, rounding_mode);
4297   return self;
4298 }
4299 template <typename batch_rule_t, batch_rule_t batch_rule>
4300 at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
4301   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4302   auto maybe_layer = maybeCurrentDynamicLayer();
4303   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4304   int64_t cur_level = maybe_layer->layerId();
4305   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4306     return at::_ops::divide_Tensor::call(self, other);
4307   }
4308   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4309   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4310   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
4311   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4312 }
4313 template <typename batch_rule_t, batch_rule_t batch_rule>
4314 at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
4315   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4316   auto maybe_layer = maybeCurrentDynamicLayer();
4317   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4318   int64_t cur_level = maybe_layer->layerId();
4319   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4320     return at::_ops::divide__Tensor::call(self, other);
4321   }
4322   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4323   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4324   batch_rule(self_value, self_bdim, other_value, other_bdim);
4325   return self;
4326 }
4327 template <typename batch_rule_t, batch_rule_t batch_rule>
4328 at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
4329   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4330   auto maybe_layer = maybeCurrentDynamicLayer();
4331   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4332   int64_t cur_level = maybe_layer->layerId();
4333   if (!isBatchedAtLevel(self, cur_level)) {
4334     return at::_ops::divide_Scalar::call(self, other);
4335   }
4336   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4337   auto results = batch_rule(self_value, self_bdim, other);
4338   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4339 }
4340 template <typename batch_rule_t, batch_rule_t batch_rule>
4341 at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
4342   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4343   auto maybe_layer = maybeCurrentDynamicLayer();
4344   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4345   int64_t cur_level = maybe_layer->layerId();
4346   if (!isBatchedAtLevel(self, cur_level)) {
4347     return at::_ops::divide__Scalar::call(self, other);
4348   }
4349   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4350   batch_rule(self_value, self_bdim, other);
4351   return self;
4352 }
4353 template <typename batch_rule_t, batch_rule_t batch_rule>
4354 at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
4355   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4356   auto maybe_layer = maybeCurrentDynamicLayer();
4357   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4358   int64_t cur_level = maybe_layer->layerId();
4359   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4360     return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode);
4361   }
4362   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4363   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4364   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
4365   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4366 }
4367 template <typename batch_rule_t, batch_rule_t batch_rule>
4368 at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
4369   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4370   auto maybe_layer = maybeCurrentDynamicLayer();
4371   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4372   int64_t cur_level = maybe_layer->layerId();
4373   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4374     return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode);
4375   }
4376   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4377   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4378   batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
4379   return self;
4380 }
4381 template <typename batch_rule_t, batch_rule_t batch_rule>
4382 at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
4383   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4384   auto maybe_layer = maybeCurrentDynamicLayer();
4385   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4386   int64_t cur_level = maybe_layer->layerId();
4387   if (!isBatchedAtLevel(self, cur_level)) {
4388     return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode);
4389   }
4390   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4391   auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
4392   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4393 }
4394 template <typename batch_rule_t, batch_rule_t batch_rule>
4395 at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
4396   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4397   auto maybe_layer = maybeCurrentDynamicLayer();
4398   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4399   int64_t cur_level = maybe_layer->layerId();
4400   if (!isBatchedAtLevel(self, cur_level)) {
4401     return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode);
4402   }
4403   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4404   batch_rule(self_value, self_bdim, other, rounding_mode);
4405   return self;
4406 }
4407 template <typename batch_rule_t, batch_rule_t batch_rule>
4408 at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
4409   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4410   auto maybe_layer = maybeCurrentDynamicLayer();
4411   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4412   int64_t cur_level = maybe_layer->layerId();
4413   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4414     return at::_ops::true_divide_Tensor::call(self, other);
4415   }
4416   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4417   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4418   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
4419   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4420 }
4421 template <typename batch_rule_t, batch_rule_t batch_rule>
4422 at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
4423   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4424   auto maybe_layer = maybeCurrentDynamicLayer();
4425   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4426   int64_t cur_level = maybe_layer->layerId();
4427   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4428     return at::_ops::true_divide__Tensor::call(self, other);
4429   }
4430   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4431   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4432   batch_rule(self_value, self_bdim, other_value, other_bdim);
4433   return self;
4434 }
4435 template <typename batch_rule_t, batch_rule_t batch_rule>
4436 at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
4437   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4438   auto maybe_layer = maybeCurrentDynamicLayer();
4439   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4440   int64_t cur_level = maybe_layer->layerId();
4441   if (!isBatchedAtLevel(self, cur_level)) {
4442     return at::_ops::true_divide_Scalar::call(self, other);
4443   }
4444   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4445   auto results = batch_rule(self_value, self_bdim, other);
4446   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4447 }
4448 template <typename batch_rule_t, batch_rule_t batch_rule>
4449 at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
4450   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4451   auto maybe_layer = maybeCurrentDynamicLayer();
4452   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4453   int64_t cur_level = maybe_layer->layerId();
4454   if (!isBatchedAtLevel(self, cur_level)) {
4455     return at::_ops::true_divide__Scalar::call(self, other);
4456   }
4457   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4458   batch_rule(self_value, self_bdim, other);
4459   return self;
4460 }
4461 template <typename batch_rule_t, batch_rule_t batch_rule>
4462 at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) {
4463   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4464   auto maybe_layer = maybeCurrentDynamicLayer();
4465   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4466   int64_t cur_level = maybe_layer->layerId();
4467   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) {
4468     return at::_ops::dot::call(self, tensor);
4469   }
4470   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4471   auto [tensor_value, tensor_bdim] = unwrapTensorAtLevel(tensor, cur_level);
4472   auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim);
4473   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4474 }
4475 template <typename batch_rule_t, batch_rule_t batch_rule>
4476 at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
4477   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4478   auto maybe_layer = maybeCurrentDynamicLayer();
4479   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4480   int64_t cur_level = maybe_layer->layerId();
4481   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4482     return at::_ops::vdot::call(self, other);
4483   }
4484   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4485   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
4486   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
4487   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4488 }
4489 template <typename batch_rule_t, batch_rule_t batch_rule>
4490 at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
4491   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4492   auto maybe_layer = maybeCurrentDynamicLayer();
4493   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4494   int64_t cur_level = maybe_layer->layerId();
4495   if (!isBatchedAtLevel(tensors, cur_level)) {
4496     return at::_ops::einsum::call(equation, tensors, path);
4497   }
4498 
4499   auto results = batch_rule(equation, tensors, path);
4500   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4501 }
4502 template <typename batch_rule_t, batch_rule_t batch_rule>
4503 at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
4504   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4505   auto maybe_layer = maybeCurrentDynamicLayer();
4506   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4507   int64_t cur_level = maybe_layer->layerId();
4508   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4509     return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
4510   }
4511   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4512   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4513   auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse);
4514   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4515 }
4516 template <typename batch_rule_t, batch_rule_t batch_rule>
4517 at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
4518   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4519   auto maybe_layer = maybeCurrentDynamicLayer();
4520   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4521   int64_t cur_level = maybe_layer->layerId();
4522   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4523     return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
4524   }
4525   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
4526   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4527   auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse);
4528   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4529 }
4530 template <typename batch_rule_t, batch_rule_t batch_rule>
4531 at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
4532   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4533   auto maybe_layer = maybeCurrentDynamicLayer();
4534   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4535   int64_t cur_level = maybe_layer->layerId();
4536   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4537     return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
4538   }
4539   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
4540   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4541   auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
4542   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4543 }
4544 template <typename batch_rule_t, batch_rule_t batch_rule>
4545 at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
4546   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4547   auto maybe_layer = maybeCurrentDynamicLayer();
4548   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4549   int64_t cur_level = maybe_layer->layerId();
4550   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4551     return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type);
4552   }
4553   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4554   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4555   batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
4556   return self;
4557 }
4558 template <typename batch_rule_t, batch_rule_t batch_rule>
4559 at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
4560   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4561   auto maybe_layer = maybeCurrentDynamicLayer();
4562   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4563   int64_t cur_level = maybe_layer->layerId();
4564   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4565     return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
4566   }
4567   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
4568   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4569   auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
4570   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4571 }
4572 template <typename batch_rule_t, batch_rule_t batch_rule>
4573 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
4574   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4575   auto maybe_layer = maybeCurrentDynamicLayer();
4576   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4577   int64_t cur_level = maybe_layer->layerId();
4578   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4579     return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
4580   }
4581   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4582   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4583   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4584   std::optional<Tensor> per_sample_weights_value;
4585   std::optional<int64_t> per_sample_weights_bdim;
4586   if (per_sample_weights) {
4587       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4588   }
4589   auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
4590   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
4591 }
4592 template <typename batch_rule_t, batch_rule_t batch_rule>
4593 ::std::tuple<at::Tensor,at::Tensor> _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
4594   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4595   auto maybe_layer = maybeCurrentDynamicLayer();
4596   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4597   int64_t cur_level = maybe_layer->layerId();
4598   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
4599     return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype);
4600   }
4601   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4602   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
4603   auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype);
4604   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4605 }
4606 template <typename batch_rule_t, batch_rule_t batch_rule>
4607 at::Tensor row_stack_generated_plumbing(at::TensorList tensors) {
4608   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4609   auto maybe_layer = maybeCurrentDynamicLayer();
4610   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4611   int64_t cur_level = maybe_layer->layerId();
4612   if (!isBatchedAtLevel(tensors, cur_level)) {
4613     return at::_ops::row_stack::call(tensors);
4614   }
4615 
4616   auto results = batch_rule(tensors);
4617   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4618 }
4619 template <typename batch_rule_t, batch_rule_t batch_rule>
4620 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
4621   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4622   auto maybe_layer = maybeCurrentDynamicLayer();
4623   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4624   int64_t cur_level = maybe_layer->layerId();
4625   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4626     return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
4627   }
4628   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4629   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4630   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4631   std::optional<Tensor> per_sample_weights_value;
4632   std::optional<int64_t> per_sample_weights_bdim;
4633   if (per_sample_weights) {
4634       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4635   }
4636   auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset);
4637   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
4638 }
4639 template <typename batch_rule_t, batch_rule_t batch_rule>
4640 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, ::std::optional<int64_t> padding_idx) {
4641   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4642   auto maybe_layer = maybeCurrentDynamicLayer();
4643   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4644   int64_t cur_level = maybe_layer->layerId();
4645   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4646     return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
4647   }
4648   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4649   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4650   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4651   std::optional<Tensor> per_sample_weights_value;
4652   std::optional<int64_t> per_sample_weights_bdim;
4653   if (per_sample_weights) {
4654       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4655   }
4656   auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
4657   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
4658 }
4659 template <typename batch_rule_t, batch_rule_t batch_rule>
4660 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
4661   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4662   auto maybe_layer = maybeCurrentDynamicLayer();
4663   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4664   int64_t cur_level = maybe_layer->layerId();
4665   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4666     return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
4667   }
4668   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4669   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4670   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4671   std::optional<Tensor> per_sample_weights_value;
4672   std::optional<int64_t> per_sample_weights_bdim;
4673   if (per_sample_weights) {
4674       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4675   }
4676   auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
4677   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
4678 }
4679 template <typename batch_rule_t, batch_rule_t batch_rule>
4680 at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
4681   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4682   auto maybe_layer = maybeCurrentDynamicLayer();
4683   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4684   int64_t cur_level = maybe_layer->layerId();
4685   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4686     return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
4687   }
4688   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
4689   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4690   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4691   auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level);
4692   auto [bag_size_value, bag_size_bdim] = unwrapTensorAtLevel(bag_size, cur_level);
4693   auto [maximum_indices_value, maximum_indices_bdim] = unwrapTensorAtLevel(maximum_indices, cur_level);
4694   std::optional<Tensor> per_sample_weights_value;
4695   std::optional<int64_t> per_sample_weights_bdim;
4696   if (per_sample_weights) {
4697       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4698   }
4699   auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
4700   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4701 }
4702 template <typename batch_rule_t, batch_rule_t batch_rule>
4703 at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
4704   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4705   auto maybe_layer = maybeCurrentDynamicLayer();
4706   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4707   int64_t cur_level = maybe_layer->layerId();
4708   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4709     return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
4710   }
4711   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
4712   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4713   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4714   auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level);
4715   auto [bag_size_value, bag_size_bdim] = unwrapTensorAtLevel(bag_size, cur_level);
4716   std::optional<Tensor> per_sample_weights_value;
4717   std::optional<int64_t> per_sample_weights_bdim;
4718   if (per_sample_weights) {
4719       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4720   }
4721   auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
4722   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4723 }
4724 template <typename batch_rule_t, batch_rule_t batch_rule>
4725 at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
4726   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4727   auto maybe_layer = maybeCurrentDynamicLayer();
4728   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4729   int64_t cur_level = maybe_layer->layerId();
4730   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
4731     return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
4732   }
4733   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
4734   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4735   auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level);
4736   auto [bag_size_value, bag_size_bdim] = unwrapTensorAtLevel(bag_size, cur_level);
4737   auto [maximum_indices_value, maximum_indices_bdim] = unwrapTensorAtLevel(maximum_indices, cur_level);
4738   std::optional<Tensor> per_sample_weights_value;
4739   std::optional<int64_t> per_sample_weights_bdim;
4740   if (per_sample_weights) {
4741       std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
4742   }
4743   auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
4744   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4745 }
4746 template <typename batch_rule_t, batch_rule_t batch_rule>
4747 at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
4748   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4749   auto maybe_layer = maybeCurrentDynamicLayer();
4750   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4751   int64_t cur_level = maybe_layer->layerId();
4752   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) {
4753     return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
4754   }
4755   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
4756   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
4757   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
4758   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
4759   auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level);
4760   auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx);
4761   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4762 }
4763 template <typename batch_rule_t, batch_rule_t batch_rule>
4764 at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
4765   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4766   auto maybe_layer = maybeCurrentDynamicLayer();
4767   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4768   int64_t cur_level = maybe_layer->layerId();
4769   if (!isBatchedAtLevel(self, cur_level)) {
4770     return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory);
4771   }
4772   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4773   auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
4774   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4775 }
4776 template <typename batch_rule_t, batch_rule_t batch_rule>
4777 at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
4778   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4779   auto maybe_layer = maybeCurrentDynamicLayer();
4780   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4781   int64_t cur_level = maybe_layer->layerId();
4782   if (!isBatchedAtLevel(self, cur_level)) {
4783     return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory);
4784   }
4785   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4786   auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory);
4787   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4788 }
4789 template <typename batch_rule_t, batch_rule_t batch_rule>
4790 at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
4791   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4792   auto maybe_layer = maybeCurrentDynamicLayer();
4793   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4794   int64_t cur_level = maybe_layer->layerId();
4795   if (!isBatchedAtLevel(self, cur_level)) {
4796     return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory);
4797   }
4798   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4799   auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory);
4800   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4801 }
4802 template <typename batch_rule_t, batch_rule_t batch_rule>
4803 at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
4804   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4805   auto maybe_layer = maybeCurrentDynamicLayer();
4806   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4807   int64_t cur_level = maybe_layer->layerId();
4808   if (!isBatchedAtLevel(self, cur_level)) {
4809     return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory);
4810   }
4811   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4812   auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
4813   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4814 }
4815 template <typename batch_rule_t, batch_rule_t batch_rule>
4816 at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
4817   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4818   auto maybe_layer = maybeCurrentDynamicLayer();
4819   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4820   int64_t cur_level = maybe_layer->layerId();
4821   if (!isBatchedAtLevel(self, cur_level)) {
4822     return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory);
4823   }
4824   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4825   auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
4826   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4827 }
4828 template <typename batch_rule_t, batch_rule_t batch_rule>
4829 at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
4830   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4831   auto maybe_layer = maybeCurrentDynamicLayer();
4832   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4833   int64_t cur_level = maybe_layer->layerId();
4834   if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
4835     return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
4836   }
4837   auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level);
4838   auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level);
4839   auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format);
4840   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4841 }
4842 template <typename batch_rule_t, batch_rule_t batch_rule>
4843 const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
4844   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4845   auto maybe_layer = maybeCurrentDynamicLayer();
4846   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4847   int64_t cur_level = maybe_layer->layerId();
4848   if (!isBatchedAtLevel(self, cur_level)) {
4849     return at::_ops::_resize_output_::call(self, size, device);
4850   }
4851   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4852   batch_rule(self_value, self_bdim, size, device);
4853   return self;
4854 }
4855 template <typename batch_rule_t, batch_rule_t batch_rule>
4856 at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
4857   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4858   auto maybe_layer = maybeCurrentDynamicLayer();
4859   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4860   int64_t cur_level = maybe_layer->layerId();
4861   if (!isBatchedAtLevel(qtensor, cur_level)) {
4862     return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
4863   }
4864   auto [qtensor_value, qtensor_bdim] = unwrapTensorAtLevel(qtensor, cur_level);
4865   auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format);
4866   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4867 }
4868 template <typename batch_rule_t, batch_rule_t batch_rule>
4869 at::Tensor empty_like_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
4870   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4871   auto maybe_layer = maybeCurrentDynamicLayer();
4872   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4873   int64_t cur_level = maybe_layer->layerId();
4874   if (!isBatchedAtLevel(self, cur_level)) {
4875     return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
4876   }
4877   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4878   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
4879   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4880 }
4881 template <typename batch_rule_t, batch_rule_t batch_rule>
4882 at::Tensor erf_generated_plumbing(const at::Tensor & self) {
4883   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4884   auto maybe_layer = maybeCurrentDynamicLayer();
4885   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4886   int64_t cur_level = maybe_layer->layerId();
4887   if (!isBatchedAtLevel(self, cur_level)) {
4888     return at::_ops::erf::call(self);
4889   }
4890   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4891   auto results = batch_rule(self_value, self_bdim);
4892   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4893 }
4894 template <typename batch_rule_t, batch_rule_t batch_rule>
4895 at::Tensor & erf__generated_plumbing(at::Tensor & self) {
4896   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4897   auto maybe_layer = maybeCurrentDynamicLayer();
4898   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4899   int64_t cur_level = maybe_layer->layerId();
4900   if (!isBatchedAtLevel(self, cur_level)) {
4901     return at::_ops::erf_::call(self);
4902   }
4903   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4904   batch_rule(self_value, self_bdim);
4905   return self;
4906 }
4907 template <typename batch_rule_t, batch_rule_t batch_rule>
4908 at::Tensor erfc_generated_plumbing(const at::Tensor & self) {
4909   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4910   auto maybe_layer = maybeCurrentDynamicLayer();
4911   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4912   int64_t cur_level = maybe_layer->layerId();
4913   if (!isBatchedAtLevel(self, cur_level)) {
4914     return at::_ops::erfc::call(self);
4915   }
4916   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4917   auto results = batch_rule(self_value, self_bdim);
4918   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4919 }
4920 template <typename batch_rule_t, batch_rule_t batch_rule>
4921 at::Tensor & erfc__generated_plumbing(at::Tensor & self) {
4922   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4923   auto maybe_layer = maybeCurrentDynamicLayer();
4924   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4925   int64_t cur_level = maybe_layer->layerId();
4926   if (!isBatchedAtLevel(self, cur_level)) {
4927     return at::_ops::erfc_::call(self);
4928   }
4929   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4930   batch_rule(self_value, self_bdim);
4931   return self;
4932 }
4933 template <typename batch_rule_t, batch_rule_t batch_rule>
4934 at::Tensor exp_generated_plumbing(const at::Tensor & self) {
4935   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4936   auto maybe_layer = maybeCurrentDynamicLayer();
4937   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4938   int64_t cur_level = maybe_layer->layerId();
4939   if (!isBatchedAtLevel(self, cur_level)) {
4940     return at::_ops::exp::call(self);
4941   }
4942   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4943   auto results = batch_rule(self_value, self_bdim);
4944   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4945 }
4946 template <typename batch_rule_t, batch_rule_t batch_rule>
4947 at::Tensor & exp__generated_plumbing(at::Tensor & self) {
4948   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4949   auto maybe_layer = maybeCurrentDynamicLayer();
4950   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4951   int64_t cur_level = maybe_layer->layerId();
4952   if (!isBatchedAtLevel(self, cur_level)) {
4953     return at::_ops::exp_::call(self);
4954   }
4955   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4956   batch_rule(self_value, self_bdim);
4957   return self;
4958 }
4959 template <typename batch_rule_t, batch_rule_t batch_rule>
4960 at::Tensor exp2_generated_plumbing(const at::Tensor & self) {
4961   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4962   auto maybe_layer = maybeCurrentDynamicLayer();
4963   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4964   int64_t cur_level = maybe_layer->layerId();
4965   if (!isBatchedAtLevel(self, cur_level)) {
4966     return at::_ops::exp2::call(self);
4967   }
4968   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4969   auto results = batch_rule(self_value, self_bdim);
4970   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4971 }
4972 template <typename batch_rule_t, batch_rule_t batch_rule>
4973 at::Tensor & exp2__generated_plumbing(at::Tensor & self) {
4974   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4975   auto maybe_layer = maybeCurrentDynamicLayer();
4976   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4977   int64_t cur_level = maybe_layer->layerId();
4978   if (!isBatchedAtLevel(self, cur_level)) {
4979     return at::_ops::exp2_::call(self);
4980   }
4981   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4982   batch_rule(self_value, self_bdim);
4983   return self;
4984 }
4985 template <typename batch_rule_t, batch_rule_t batch_rule>
4986 at::Tensor expm1_generated_plumbing(const at::Tensor & self) {
4987   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4988   auto maybe_layer = maybeCurrentDynamicLayer();
4989   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4990   int64_t cur_level = maybe_layer->layerId();
4991   if (!isBatchedAtLevel(self, cur_level)) {
4992     return at::_ops::expm1::call(self);
4993   }
4994   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
4995   auto results = batch_rule(self_value, self_bdim);
4996   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4997 }
4998 template <typename batch_rule_t, batch_rule_t batch_rule>
4999 at::Tensor & expm1__generated_plumbing(at::Tensor & self) {
5000   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5001   auto maybe_layer = maybeCurrentDynamicLayer();
5002   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5003   int64_t cur_level = maybe_layer->layerId();
5004   if (!isBatchedAtLevel(self, cur_level)) {
5005     return at::_ops::expm1_::call(self);
5006   }
5007   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5008   batch_rule(self_value, self_bdim);
5009   return self;
5010 }
5011 template <typename batch_rule_t, batch_rule_t batch_rule>
5012 at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
5013   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5014   auto maybe_layer = maybeCurrentDynamicLayer();
5015   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5016   int64_t cur_level = maybe_layer->layerId();
5017   if (!isBatchedAtLevel(self, cur_level)) {
5018     return at::_ops::expand::call(self, size, implicit);
5019   }
5020   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5021   auto results = batch_rule(self_value, self_bdim, size, implicit);
5022   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5023 }
5024 template <typename batch_rule_t, batch_rule_t batch_rule>
5025 at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5026   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5027   auto maybe_layer = maybeCurrentDynamicLayer();
5028   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5029   int64_t cur_level = maybe_layer->layerId();
5030   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5031     return at::_ops::expand_as::call(self, other);
5032   }
5033   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5034   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5035   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5036   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5037 }
5038 template <typename batch_rule_t, batch_rule_t batch_rule>
5039 at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
5040   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5041   auto maybe_layer = maybeCurrentDynamicLayer();
5042   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5043   int64_t cur_level = maybe_layer->layerId();
5044   if (!isBatchedAtLevel(self, cur_level)) {
5045     return at::_ops::flatten_using_ints::call(self, start_dim, end_dim);
5046   }
5047   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5048   auto results = batch_rule(self_value, self_bdim, start_dim, end_dim);
5049   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5050 }
5051 template <typename batch_rule_t, batch_rule_t batch_rule>
5052 at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
5053   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5054   auto maybe_layer = maybeCurrentDynamicLayer();
5055   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5056   int64_t cur_level = maybe_layer->layerId();
5057   if (!isBatchedAtLevel(self, cur_level)) {
5058     return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim);
5059   }
5060   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5061   auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
5062   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5063 }
5064 template <typename batch_rule_t, batch_rule_t batch_rule>
5065 at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
5066   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5067   auto maybe_layer = maybeCurrentDynamicLayer();
5068   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5069   int64_t cur_level = maybe_layer->layerId();
5070   if (!isBatchedAtLevel(self, cur_level)) {
5071     return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim);
5072   }
5073   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5074   auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
5075   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5076 }
5077 template <typename batch_rule_t, batch_rule_t batch_rule>
5078 at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
5079   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5080   auto maybe_layer = maybeCurrentDynamicLayer();
5081   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5082   int64_t cur_level = maybe_layer->layerId();
5083   if (!isBatchedAtLevel(self, cur_level)) {
5084     return at::_ops::flatten_DimnameList::call(self, dims, out_dim);
5085   }
5086   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5087   auto results = batch_rule(self_value, self_bdim, dims, out_dim);
5088   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5089 }
5090 template <typename batch_rule_t, batch_rule_t batch_rule>
5091 at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) {
5092   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5093   auto maybe_layer = maybeCurrentDynamicLayer();
5094   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5095   int64_t cur_level = maybe_layer->layerId();
5096   if (!isBatchedAtLevel(self, cur_level)) {
5097     return at::_ops::unflatten_int::call(self, dim, sizes);
5098   }
5099   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5100   auto results = batch_rule(self_value, self_bdim, dim, sizes);
5101   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5102 }
5103 template <typename batch_rule_t, batch_rule_t batch_rule>
5104 at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) {
5105   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5106   auto maybe_layer = maybeCurrentDynamicLayer();
5107   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5108   int64_t cur_level = maybe_layer->layerId();
5109   if (!isBatchedAtLevel(self, cur_level)) {
5110     return at::_ops::unflatten_Dimname::call(self, dim, sizes, names);
5111   }
5112   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5113   auto results = batch_rule(self_value, self_bdim, dim, sizes, names);
5114   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5115 }
5116 template <typename batch_rule_t, batch_rule_t batch_rule>
5117 at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) {
5118   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5119   auto maybe_layer = maybeCurrentDynamicLayer();
5120   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5121   int64_t cur_level = maybe_layer->layerId();
5122   if (!isBatchedAtLevel(self, cur_level)) {
5123     return at::_ops::fill_Scalar::call(self, value);
5124   }
5125   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5126   auto results = batch_rule(self_value, self_bdim, value);
5127   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5128 }
5129 template <typename batch_rule_t, batch_rule_t batch_rule>
5130 at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) {
5131   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5132   auto maybe_layer = maybeCurrentDynamicLayer();
5133   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5134   int64_t cur_level = maybe_layer->layerId();
5135   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
5136     return at::_ops::fill_Tensor::call(self, value);
5137   }
5138   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5139   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
5140   auto results = batch_rule(self_value, self_bdim, value_value, value_bdim);
5141   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5142 }
5143 template <typename batch_rule_t, batch_rule_t batch_rule>
5144 at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) {
5145   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5146   auto maybe_layer = maybeCurrentDynamicLayer();
5147   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5148   int64_t cur_level = maybe_layer->layerId();
5149   if (!isBatchedAtLevel(self, cur_level)) {
5150     return at::_ops::fill__Scalar::call(self, value);
5151   }
5152   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5153   batch_rule(self_value, self_bdim, value);
5154   return self;
5155 }
5156 template <typename batch_rule_t, batch_rule_t batch_rule>
5157 at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) {
5158   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5159   auto maybe_layer = maybeCurrentDynamicLayer();
5160   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5161   int64_t cur_level = maybe_layer->layerId();
5162   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
5163     return at::_ops::fill__Tensor::call(self, value);
5164   }
5165   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5166   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
5167   batch_rule(self_value, self_bdim, value_value, value_bdim);
5168   return self;
5169 }
5170 template <typename batch_rule_t, batch_rule_t batch_rule>
5171 at::Tensor floor_generated_plumbing(const at::Tensor & self) {
5172   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5173   auto maybe_layer = maybeCurrentDynamicLayer();
5174   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5175   int64_t cur_level = maybe_layer->layerId();
5176   if (!isBatchedAtLevel(self, cur_level)) {
5177     return at::_ops::floor::call(self);
5178   }
5179   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5180   auto results = batch_rule(self_value, self_bdim);
5181   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5182 }
5183 template <typename batch_rule_t, batch_rule_t batch_rule>
5184 at::Tensor & floor__generated_plumbing(at::Tensor & self) {
5185   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5186   auto maybe_layer = maybeCurrentDynamicLayer();
5187   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5188   int64_t cur_level = maybe_layer->layerId();
5189   if (!isBatchedAtLevel(self, cur_level)) {
5190     return at::_ops::floor_::call(self);
5191   }
5192   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5193   batch_rule(self_value, self_bdim);
5194   return self;
5195 }
5196 template <typename batch_rule_t, batch_rule_t batch_rule>
5197 at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5198   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5199   auto maybe_layer = maybeCurrentDynamicLayer();
5200   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5201   int64_t cur_level = maybe_layer->layerId();
5202   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5203     return at::_ops::floor_divide::call(self, other);
5204   }
5205   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5206   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5207   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5208   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5209 }
5210 template <typename batch_rule_t, batch_rule_t batch_rule>
5211 at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
5212   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5213   auto maybe_layer = maybeCurrentDynamicLayer();
5214   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5215   int64_t cur_level = maybe_layer->layerId();
5216   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5217     return at::_ops::floor_divide__Tensor::call(self, other);
5218   }
5219   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5220   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5221   batch_rule(self_value, self_bdim, other_value, other_bdim);
5222   return self;
5223 }
5224 template <typename batch_rule_t, batch_rule_t batch_rule>
5225 at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
5226   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5227   auto maybe_layer = maybeCurrentDynamicLayer();
5228   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5229   int64_t cur_level = maybe_layer->layerId();
5230   if (!isBatchedAtLevel(self, cur_level)) {
5231     return at::_ops::floor_divide_Scalar::call(self, other);
5232   }
5233   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5234   auto results = batch_rule(self_value, self_bdim, other);
5235   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5236 }
5237 template <typename batch_rule_t, batch_rule_t batch_rule>
5238 at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
5239   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5240   auto maybe_layer = maybeCurrentDynamicLayer();
5241   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5242   int64_t cur_level = maybe_layer->layerId();
5243   if (!isBatchedAtLevel(self, cur_level)) {
5244     return at::_ops::floor_divide__Scalar::call(self, other);
5245   }
5246   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5247   batch_rule(self_value, self_bdim, other);
5248   return self;
5249 }
5250 template <typename batch_rule_t, batch_rule_t batch_rule>
5251 at::Tensor frac_generated_plumbing(const at::Tensor & self) {
5252   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5253   auto maybe_layer = maybeCurrentDynamicLayer();
5254   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5255   int64_t cur_level = maybe_layer->layerId();
5256   if (!isBatchedAtLevel(self, cur_level)) {
5257     return at::_ops::frac::call(self);
5258   }
5259   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5260   auto results = batch_rule(self_value, self_bdim);
5261   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5262 }
5263 template <typename batch_rule_t, batch_rule_t batch_rule>
5264 at::Tensor & frac__generated_plumbing(at::Tensor & self) {
5265   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5266   auto maybe_layer = maybeCurrentDynamicLayer();
5267   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5268   int64_t cur_level = maybe_layer->layerId();
5269   if (!isBatchedAtLevel(self, cur_level)) {
5270     return at::_ops::frac_::call(self);
5271   }
5272   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5273   batch_rule(self_value, self_bdim);
5274   return self;
5275 }
5276 template <typename batch_rule_t, batch_rule_t batch_rule>
5277 at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
5278   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5279   auto maybe_layer = maybeCurrentDynamicLayer();
5280   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5281   int64_t cur_level = maybe_layer->layerId();
5282   if (!isBatchedAtLevel(self, cur_level)) {
5283     return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
5284   }
5285   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5286   auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format);
5287   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5288 }
5289 template <typename batch_rule_t, batch_rule_t batch_rule>
5290 at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5291   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5292   auto maybe_layer = maybeCurrentDynamicLayer();
5293   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5294   int64_t cur_level = maybe_layer->layerId();
5295   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5296     return at::_ops::gcd::call(self, other);
5297   }
5298   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5299   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5300   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5301   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5302 }
5303 template <typename batch_rule_t, batch_rule_t batch_rule>
5304 at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
5305   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5306   auto maybe_layer = maybeCurrentDynamicLayer();
5307   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5308   int64_t cur_level = maybe_layer->layerId();
5309   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5310     return at::_ops::gcd_::call(self, other);
5311   }
5312   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5313   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5314   batch_rule(self_value, self_bdim, other_value, other_bdim);
5315   return self;
5316 }
5317 template <typename batch_rule_t, batch_rule_t batch_rule>
5318 at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5319   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5320   auto maybe_layer = maybeCurrentDynamicLayer();
5321   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5322   int64_t cur_level = maybe_layer->layerId();
5323   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5324     return at::_ops::lcm::call(self, other);
5325   }
5326   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5327   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5328   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5329   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5330 }
5331 template <typename batch_rule_t, batch_rule_t batch_rule>
5332 at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
5333   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5334   auto maybe_layer = maybeCurrentDynamicLayer();
5335   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5336   int64_t cur_level = maybe_layer->layerId();
5337   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5338     return at::_ops::lcm_::call(self, other);
5339   }
5340   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5341   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5342   batch_rule(self_value, self_bdim, other_value, other_bdim);
5343   return self;
5344 }
5345 template <typename batch_rule_t, batch_rule_t batch_rule>
5346 at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
5347   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5348   auto maybe_layer = maybeCurrentDynamicLayer();
5349   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5350   int64_t cur_level = maybe_layer->layerId();
5351   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5352     return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners);
5353   }
5354   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5355   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5356   auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
5357   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5358 }
5359 template <typename batch_rule_t, batch_rule_t batch_rule>
5360 at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
5361   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5362   auto maybe_layer = maybeCurrentDynamicLayer();
5363   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5364   int64_t cur_level = maybe_layer->layerId();
5365   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5366     return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners);
5367   }
5368   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5369   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5370   auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
5371   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5372 }
5373 template <typename batch_rule_t, batch_rule_t batch_rule>
5374 ::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
5375   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5376   auto maybe_layer = maybeCurrentDynamicLayer();
5377   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5378   int64_t cur_level = maybe_layer->layerId();
5379   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5380     return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
5381   }
5382   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
5383   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5384   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5385   auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
5386   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
5387 }
5388 template <typename batch_rule_t, batch_rule_t batch_rule>
5389 at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
5390   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5391   auto maybe_layer = maybeCurrentDynamicLayer();
5392   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5393   int64_t cur_level = maybe_layer->layerId();
5394   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5395     return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
5396   }
5397   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5398   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5399   auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
5400   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5401 }
5402 template <typename batch_rule_t, batch_rule_t batch_rule>
5403 ::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
5404   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5405   auto maybe_layer = maybeCurrentDynamicLayer();
5406   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5407   int64_t cur_level = maybe_layer->layerId();
5408   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5409     return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
5410   }
5411   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
5412   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5413   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5414   auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
5415   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
5416 }
5417 template <typename batch_rule_t, batch_rule_t batch_rule>
5418 at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
5419   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5420   auto maybe_layer = maybeCurrentDynamicLayer();
5421   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5422   int64_t cur_level = maybe_layer->layerId();
5423   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5424     return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners);
5425   }
5426   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5427   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5428   auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
5429   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5430 }
5431 template <typename batch_rule_t, batch_rule_t batch_rule>
5432 ::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
5433   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5434   auto maybe_layer = maybeCurrentDynamicLayer();
5435   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5436   int64_t cur_level = maybe_layer->layerId();
5437   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
5438     return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
5439   }
5440   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
5441   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5442   auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level);
5443   auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
5444   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
5445 }
5446 template <typename batch_rule_t, batch_rule_t batch_rule>
5447 at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
5448   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5449   auto maybe_layer = maybeCurrentDynamicLayer();
5450   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5451   int64_t cur_level = maybe_layer->layerId();
5452   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
5453     return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction);
5454   }
5455   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5456   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
5457   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction);
5458   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5459 }
5460 template <typename batch_rule_t, batch_rule_t batch_rule>
5461 at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
5462   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5463   auto maybe_layer = maybeCurrentDynamicLayer();
5464   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5465   int64_t cur_level = maybe_layer->layerId();
5466   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
5467     return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled);
5468   }
5469   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5470   std::optional<Tensor> weight_value;
5471   std::optional<int64_t> weight_bdim;
5472   if (weight) {
5473       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5474   }
5475   std::optional<Tensor> bias_value;
5476   std::optional<int64_t> bias_bdim;
5477   if (bias) {
5478       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
5479   }
5480   auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled);
5481   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5482 }
5483 template <typename batch_rule_t, batch_rule_t batch_rule>
5484 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
5485   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5486   auto maybe_layer = maybeCurrentDynamicLayer();
5487   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5488   int64_t cur_level = maybe_layer->layerId();
5489   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
5490     return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
5491   }
5492   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5493   std::optional<Tensor> weight_value;
5494   std::optional<int64_t> weight_bdim;
5495   if (weight) {
5496       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5497   }
5498   std::optional<Tensor> bias_value;
5499   std::optional<int64_t> bias_bdim;
5500   if (bias) {
5501       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
5502   }
5503   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps);
5504   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
5505 }
5506 template <typename batch_rule_t, batch_rule_t batch_rule>
5507 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
5508   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5509   auto maybe_layer = maybeCurrentDynamicLayer();
5510   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5511   int64_t cur_level = maybe_layer->layerId();
5512   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
5513     return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
5514   }
5515   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
5516   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5517   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
5518   auto [rstd_value, rstd_bdim] = unwrapTensorAtLevel(rstd, cur_level);
5519   std::optional<Tensor> weight_value;
5520   std::optional<int64_t> weight_bdim;
5521   if (weight) {
5522       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5523   }
5524   auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask);
5525   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
5526 }
5527 template <typename batch_rule_t, batch_rule_t batch_rule>
5528 at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
5529   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5530   auto maybe_layer = maybeCurrentDynamicLayer();
5531   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5532   int64_t cur_level = maybe_layer->layerId();
5533   if (!isBatchedAtLevel(self, cur_level)) {
5534     return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
5535   }
5536   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5537   auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided);
5538   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5539 }
5540 template <typename batch_rule_t, batch_rule_t batch_rule>
5541 at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
5542   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5543   auto maybe_layer = maybeCurrentDynamicLayer();
5544   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5545   int64_t cur_level = maybe_layer->layerId();
5546   if (!isBatchedAtLevel(self, cur_level)) {
5547     return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size);
5548   }
5549   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5550   auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size);
5551   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5552 }
5553 template <typename batch_rule_t, batch_rule_t batch_rule>
5554 at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
5555   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5556   auto maybe_layer = maybeCurrentDynamicLayer();
5557   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5558   int64_t cur_level = maybe_layer->layerId();
5559   if (!isBatchedAtLevel(self, cur_level)) {
5560     return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
5561   }
5562   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5563   auto results = batch_rule(self_value, self_bdim, dim, normalization, forward);
5564   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5565 }
5566 template <typename batch_rule_t, batch_rule_t batch_rule>
5567 void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
5568   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5569   auto maybe_layer = maybeCurrentDynamicLayer();
5570   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
5571   int64_t cur_level = maybe_layer->layerId();
5572   if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) {
5573     return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
5574   }
5575   auto [compressed_idx_value, compressed_idx_bdim] = unwrapTensorAtLevel(compressed_idx, cur_level);
5576   auto [plain_idx_value, plain_idx_bdim] = unwrapTensorAtLevel(plain_idx, cur_level);
5577   batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz);
5578 }
5579 template <typename batch_rule_t, batch_rule_t batch_rule>
5580 at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices) {
5581   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5582   auto maybe_layer = maybeCurrentDynamicLayer();
5583   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5584   int64_t cur_level = maybe_layer->layerId();
5585   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5586     return at::_ops::index_Tensor::call(self, indices);
5587   }
5588   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5589   auto results = batch_rule(self_value, self_bdim, indices);
5590   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5591 }
5592 template <typename batch_rule_t, batch_rule_t batch_rule>
5593 at::Tensor _unsafe_index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices) {
5594   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5595   auto maybe_layer = maybeCurrentDynamicLayer();
5596   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5597   int64_t cur_level = maybe_layer->layerId();
5598   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5599     return at::_ops::_unsafe_index_Tensor::call(self, indices);
5600   }
5601   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5602   auto results = batch_rule(self_value, self_bdim, indices);
5603   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5604 }
5605 template <typename batch_rule_t, batch_rule_t batch_rule>
5606 at::Tensor _unsafe_masked_index_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Scalar & fill) {
5607   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5608   auto maybe_layer = maybeCurrentDynamicLayer();
5609   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5610   int64_t cur_level = maybe_layer->layerId();
5611   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5612     return at::_ops::_unsafe_masked_index::call(self, mask, indices, fill);
5613   }
5614   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5615   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
5616   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, indices, fill);
5617   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5618 }
5619 template <typename batch_rule_t, batch_rule_t batch_rule>
5620 at::Tensor _unsafe_masked_index_put_accumulate_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values) {
5621   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5622   auto maybe_layer = maybeCurrentDynamicLayer();
5623   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5624   int64_t cur_level = maybe_layer->layerId();
5625   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
5626     return at::_ops::_unsafe_masked_index_put_accumulate::call(self, mask, indices, values);
5627   }
5628   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5629   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
5630   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
5631   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, indices, values_value, values_bdim);
5632   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5633 }
5634 template <typename batch_rule_t, batch_rule_t batch_rule>
5635 at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
5636   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5637   auto maybe_layer = maybeCurrentDynamicLayer();
5638   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5639   int64_t cur_level = maybe_layer->layerId();
5640   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
5641     return at::_ops::index_copy_::call(self, dim, index, source);
5642   }
5643   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5644   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
5645   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
5646   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
5647   return self;
5648 }
5649 template <typename batch_rule_t, batch_rule_t batch_rule>
5650 at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
5651   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5652   auto maybe_layer = maybeCurrentDynamicLayer();
5653   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5654   int64_t cur_level = maybe_layer->layerId();
5655   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
5656     return at::_ops::index_copy::call(self, dim, index, source);
5657   }
5658   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5659   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
5660   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
5661   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
5662   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5663 }
5664 template <typename batch_rule_t, batch_rule_t batch_rule>
5665 at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
5666   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5667   auto maybe_layer = maybeCurrentDynamicLayer();
5668   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5669   int64_t cur_level = maybe_layer->layerId();
5670   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
5671     return at::_ops::index_copy__dimname::call(self, dim, index, source);
5672   }
5673   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5674   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
5675   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
5676   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
5677   return self;
5678 }
5679 template <typename batch_rule_t, batch_rule_t batch_rule>
5680 at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
5681   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5682   auto maybe_layer = maybeCurrentDynamicLayer();
5683   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5684   int64_t cur_level = maybe_layer->layerId();
5685   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
5686     return at::_ops::index_copy_dimname::call(self, dim, index, source);
5687   }
5688   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5689   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
5690   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
5691   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
5692   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5693 }
5694 template <typename batch_rule_t, batch_rule_t batch_rule>
5695 at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
5696   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5697   auto maybe_layer = maybeCurrentDynamicLayer();
5698   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5699   int64_t cur_level = maybe_layer->layerId();
5700   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
5701     return at::_ops::index_put_::call(self, indices, values, accumulate);
5702   }
5703   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5704   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
5705   batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
5706   return self;
5707 }
5708 template <typename batch_rule_t, batch_rule_t batch_rule>
5709 at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
5710   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5711   auto maybe_layer = maybeCurrentDynamicLayer();
5712   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5713   int64_t cur_level = maybe_layer->layerId();
5714   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
5715     return at::_ops::index_put::call(self, indices, values, accumulate);
5716   }
5717   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5718   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
5719   auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
5720   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5721 }
5722 template <typename batch_rule_t, batch_rule_t batch_rule>
5723 at::Tensor _unsafe_index_put_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
5724   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5725   auto maybe_layer = maybeCurrentDynamicLayer();
5726   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5727   int64_t cur_level = maybe_layer->layerId();
5728   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
5729     return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate);
5730   }
5731   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5732   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
5733   auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
5734   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5735 }
5736 template <typename batch_rule_t, batch_rule_t batch_rule>
5737 at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
5738   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5739   auto maybe_layer = maybeCurrentDynamicLayer();
5740   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5741   int64_t cur_level = maybe_layer->layerId();
5742   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
5743     return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
5744   }
5745   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5746   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
5747   batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
5748   return self;
5749 }
5750 template <typename batch_rule_t, batch_rule_t batch_rule>
5751 at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
5752   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5753   auto maybe_layer = maybeCurrentDynamicLayer();
5754   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5755   int64_t cur_level = maybe_layer->layerId();
5756   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
5757     return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
5758   }
5759   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5760   std::optional<Tensor> weight_value;
5761   std::optional<int64_t> weight_bdim;
5762   if (weight) {
5763       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5764   }
5765   std::optional<Tensor> bias_value;
5766   std::optional<int64_t> bias_bdim;
5767   if (bias) {
5768       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
5769   }
5770   std::optional<Tensor> running_mean_value;
5771   std::optional<int64_t> running_mean_bdim;
5772   if (running_mean) {
5773       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
5774   }
5775   std::optional<Tensor> running_var_value;
5776   std::optional<int64_t> running_var_bdim;
5777   if (running_var) {
5778       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
5779   }
5780   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled);
5781   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5782 }
5783 template <typename batch_rule_t, batch_rule_t batch_rule>
5784 at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
5785   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5786   auto maybe_layer = maybeCurrentDynamicLayer();
5787   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5788   int64_t cur_level = maybe_layer->layerId();
5789   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5790     return at::_ops::isclose::call(self, other, rtol, atol, equal_nan);
5791   }
5792   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5793   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5794   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan);
5795   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5796 }
5797 template <typename batch_rule_t, batch_rule_t batch_rule>
5798 at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
5799   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5800   auto maybe_layer = maybeCurrentDynamicLayer();
5801   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5802   int64_t cur_level = maybe_layer->layerId();
5803   if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) {
5804     return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert);
5805   }
5806   auto [elements_value, elements_bdim] = unwrapTensorAtLevel(elements, cur_level);
5807   auto [test_elements_value, test_elements_bdim] = unwrapTensorAtLevel(test_elements, cur_level);
5808   auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert);
5809   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5810 }
5811 template <typename batch_rule_t, batch_rule_t batch_rule>
5812 at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
5813   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5814   auto maybe_layer = maybeCurrentDynamicLayer();
5815   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5816   int64_t cur_level = maybe_layer->layerId();
5817   if (!isBatchedAtLevel(elements, cur_level)) {
5818     return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert);
5819   }
5820   auto [elements_value, elements_bdim] = unwrapTensorAtLevel(elements, cur_level);
5821   auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert);
5822   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5823 }
5824 template <typename batch_rule_t, batch_rule_t batch_rule>
5825 at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
5826   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5827   auto maybe_layer = maybeCurrentDynamicLayer();
5828   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5829   int64_t cur_level = maybe_layer->layerId();
5830   if (!isBatchedAtLevel(test_elements, cur_level)) {
5831     return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert);
5832   }
5833   auto [test_elements_value, test_elements_bdim] = unwrapTensorAtLevel(test_elements, cur_level);
5834   auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert);
5835   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5836 }
5837 template <typename batch_rule_t, batch_rule_t batch_rule>
5838 at::Tensor isnan_generated_plumbing(const at::Tensor & self) {
5839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5840   auto maybe_layer = maybeCurrentDynamicLayer();
5841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5842   int64_t cur_level = maybe_layer->layerId();
5843   if (!isBatchedAtLevel(self, cur_level)) {
5844     return at::_ops::isnan::call(self);
5845   }
5846   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5847   auto results = batch_rule(self_value, self_bdim);
5848   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5849 }
5850 template <typename batch_rule_t, batch_rule_t batch_rule>
5851 at::Tensor isreal_generated_plumbing(const at::Tensor & self) {
5852   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5853   auto maybe_layer = maybeCurrentDynamicLayer();
5854   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5855   int64_t cur_level = maybe_layer->layerId();
5856   if (!isBatchedAtLevel(self, cur_level)) {
5857     return at::_ops::isreal::call(self);
5858   }
5859   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5860   auto results = batch_rule(self_value, self_bdim);
5861   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5862 }
5863 template <typename batch_rule_t, batch_rule_t batch_rule>
5864 at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
5865   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5866   auto maybe_layer = maybeCurrentDynamicLayer();
5867   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5868   int64_t cur_level = maybe_layer->layerId();
5869   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
5870     return at::_ops::kl_div::call(self, target, reduction, log_target);
5871   }
5872   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5873   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
5874   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target);
5875   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5876 }
5877 template <typename batch_rule_t, batch_rule_t batch_rule>
5878 at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5879   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5880   auto maybe_layer = maybeCurrentDynamicLayer();
5881   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5882   int64_t cur_level = maybe_layer->layerId();
5883   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5884     return at::_ops::kron::call(self, other);
5885   }
5886   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5887   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
5888   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5889   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5890 }
5891 template <typename batch_rule_t, batch_rule_t batch_rule>
5892 ::std::tuple<at::Tensor,at::Tensor> kthvalue_generated_plumbing(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim) {
5893   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5894   auto maybe_layer = maybeCurrentDynamicLayer();
5895   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5896   int64_t cur_level = maybe_layer->layerId();
5897   if (!isBatchedAtLevel(self, cur_level)) {
5898     return at::_ops::kthvalue::call(self, k, dim, keepdim);
5899   }
5900   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5901   auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
5902   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
5903 }
5904 template <typename batch_rule_t, batch_rule_t batch_rule>
5905 ::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname_generated_plumbing(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim) {
5906   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5907   auto maybe_layer = maybeCurrentDynamicLayer();
5908   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5909   int64_t cur_level = maybe_layer->layerId();
5910   if (!isBatchedAtLevel(self, cur_level)) {
5911     return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim);
5912   }
5913   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
5914   auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
5915   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
5916 }
5917 template <typename batch_rule_t, batch_rule_t batch_rule>
5918 at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
5919   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5920   auto maybe_layer = maybeCurrentDynamicLayer();
5921   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5922   int64_t cur_level = maybe_layer->layerId();
5923   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
5924     return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
5925   }
5926   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5927   std::optional<Tensor> weight_value;
5928   std::optional<int64_t> weight_bdim;
5929   if (weight) {
5930       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5931   }
5932   std::optional<Tensor> bias_value;
5933   std::optional<int64_t> bias_bdim;
5934   if (bias) {
5935       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
5936   }
5937   auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable);
5938   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5939 }
5940 template <typename batch_rule_t, batch_rule_t batch_rule>
5941 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
5942   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5943   auto maybe_layer = maybeCurrentDynamicLayer();
5944   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5945   int64_t cur_level = maybe_layer->layerId();
5946   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
5947     return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
5948   }
5949   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5950   std::optional<Tensor> weight_value;
5951   std::optional<int64_t> weight_bdim;
5952   if (weight) {
5953       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5954   }
5955   std::optional<Tensor> bias_value;
5956   std::optional<int64_t> bias_bdim;
5957   if (bias) {
5958       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
5959   }
5960   auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps);
5961   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
5962 }
5963 template <typename batch_rule_t, batch_rule_t batch_rule>
5964 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
5965   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5966   auto maybe_layer = maybeCurrentDynamicLayer();
5967   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5968   int64_t cur_level = maybe_layer->layerId();
5969   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
5970     return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
5971   }
5972   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
5973   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5974   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
5975   auto [rstd_value, rstd_bdim] = unwrapTensorAtLevel(rstd, cur_level);
5976   std::optional<Tensor> weight_value;
5977   std::optional<int64_t> weight_bdim;
5978   if (weight) {
5979       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
5980   }
5981   std::optional<Tensor> bias_value;
5982   std::optional<int64_t> bias_bdim;
5983   if (bias) {
5984       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
5985   }
5986   auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask);
5987   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
5988 }
5989 template <typename batch_rule_t, batch_rule_t batch_rule>
5990 at::Tensor rms_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, ::std::optional<double> eps) {
5991   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5992   auto maybe_layer = maybeCurrentDynamicLayer();
5993   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5994   int64_t cur_level = maybe_layer->layerId();
5995   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
5996     return at::_ops::rms_norm::call(input, normalized_shape, weight, eps);
5997   }
5998   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
5999   std::optional<Tensor> weight_value;
6000   std::optional<int64_t> weight_bdim;
6001   if (weight) {
6002       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6003   }
6004   auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, eps);
6005   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6006 }
6007 template <typename batch_rule_t, batch_rule_t batch_rule>
6008 at::Tensor _fused_rms_norm_generated_plumbing(const at::Tensor & input, int64_t normalized_shape_ndim, const at::Tensor & weight, double eps) {
6009   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6010   auto maybe_layer = maybeCurrentDynamicLayer();
6011   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6012   int64_t cur_level = maybe_layer->layerId();
6013   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
6014     return at::_ops::_fused_rms_norm::call(input, normalized_shape_ndim, weight, eps);
6015   }
6016   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6017   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6018   auto results = batch_rule(input_value, input_bdim, normalized_shape_ndim, weight_value, weight_bdim, eps);
6019   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6020 }
6021 template <typename batch_rule_t, batch_rule_t batch_rule>
6022 at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
6023   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6024   auto maybe_layer = maybeCurrentDynamicLayer();
6025   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6026   int64_t cur_level = maybe_layer->layerId();
6027   if (!isBatchedAtLevel(self, cur_level)) {
6028     return at::_ops::nan_to_num::call(self, nan, posinf, neginf);
6029   }
6030   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6031   auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf);
6032   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6033 }
6034 template <typename batch_rule_t, batch_rule_t batch_rule>
6035 at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
6036   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6037   auto maybe_layer = maybeCurrentDynamicLayer();
6038   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6039   int64_t cur_level = maybe_layer->layerId();
6040   if (!isBatchedAtLevel(self, cur_level)) {
6041     return at::_ops::nan_to_num_::call(self, nan, posinf, neginf);
6042   }
6043   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6044   batch_rule(self_value, self_bdim, nan, posinf, neginf);
6045   return self;
6046 }
6047 template <typename batch_rule_t, batch_rule_t batch_rule>
6048 at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
6049   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6050   auto maybe_layer = maybeCurrentDynamicLayer();
6051   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6052   int64_t cur_level = maybe_layer->layerId();
6053   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6054     return at::_ops::linear::call(input, weight, bias);
6055   }
6056   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6057   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6058   std::optional<Tensor> bias_value;
6059   std::optional<int64_t> bias_bdim;
6060   if (bias) {
6061       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6062   }
6063   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
6064   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6065 }
6066 template <typename batch_rule_t, batch_rule_t batch_rule>
6067 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
6068   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6069   auto maybe_layer = maybeCurrentDynamicLayer();
6070   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6071   int64_t cur_level = maybe_layer->layerId();
6072   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
6073     return at::_ops::linear_backward::call(self, grad_output, weight, output_mask);
6074   }
6075   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6076   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
6077   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6078   auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
6079   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
6080 }
6081 template <typename batch_rule_t, batch_rule_t batch_rule>
6082 at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
6083   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6084   auto maybe_layer = maybeCurrentDynamicLayer();
6085   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6086   int64_t cur_level = maybe_layer->layerId();
6087   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6088     return at::_ops::mkldnn_linear::call(self, weight, bias);
6089   }
6090   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6091   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6092   std::optional<Tensor> bias_value;
6093   std::optional<int64_t> bias_bdim;
6094   if (bias) {
6095       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6096   }
6097   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
6098   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6099 }
6100 template <typename batch_rule_t, batch_rule_t batch_rule>
6101 at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
6102   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6103   auto maybe_layer = maybeCurrentDynamicLayer();
6104   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6105   int64_t cur_level = maybe_layer->layerId();
6106   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
6107     return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight);
6108   }
6109   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
6110   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6111   auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim);
6112   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6113 }
6114 template <typename batch_rule_t, batch_rule_t batch_rule>
6115 ::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
6116   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6117   auto maybe_layer = maybeCurrentDynamicLayer();
6118   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6119   int64_t cur_level = maybe_layer->layerId();
6120   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
6121     return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined);
6122   }
6123   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
6124   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6125   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6126   auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined);
6127   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6128 }
6129 template <typename batch_rule_t, batch_rule_t batch_rule>
6130 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
6131   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6132   auto maybe_layer = maybeCurrentDynamicLayer();
6133   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6134   int64_t cur_level = maybe_layer->layerId();
6135   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
6136     return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask);
6137   }
6138   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6139   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
6140   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6141   auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
6142   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
6143 }
6144 template <typename batch_rule_t, batch_rule_t batch_rule>
6145 at::Tensor _cslt_compress_generated_plumbing(const at::Tensor & input) {
6146   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6147   auto maybe_layer = maybeCurrentDynamicLayer();
6148   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6149   int64_t cur_level = maybe_layer->layerId();
6150   if (!isBatchedAtLevel(input, cur_level)) {
6151     return at::_ops::_cslt_compress::call(input);
6152   }
6153   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6154   auto results = batch_rule(input_value, input_bdim);
6155   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6156 }
6157 template <typename batch_rule_t, batch_rule_t batch_rule>
6158 at::Tensor _cslt_sparse_mm_generated_plumbing(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & alpha, ::std::optional<at::ScalarType> out_dtype, bool transpose_result, int64_t alg_id, int64_t split_k, int64_t split_k_mode) {
6159   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6160   auto maybe_layer = maybeCurrentDynamicLayer();
6161   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6162   int64_t cur_level = maybe_layer->layerId();
6163   if (!isBatchedAtLevel(compressed_A, cur_level) && !isBatchedAtLevel(dense_B, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(alpha, cur_level)) {
6164     return at::_ops::_cslt_sparse_mm::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id, split_k, split_k_mode);
6165   }
6166   auto [compressed_A_value, compressed_A_bdim] = unwrapTensorAtLevel(compressed_A, cur_level);
6167   auto [dense_B_value, dense_B_bdim] = unwrapTensorAtLevel(dense_B, cur_level);
6168   std::optional<Tensor> bias_value;
6169   std::optional<int64_t> bias_bdim;
6170   if (bias) {
6171       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6172   }
6173   std::optional<Tensor> alpha_value;
6174   std::optional<int64_t> alpha_bdim;
6175   if (alpha) {
6176       std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha.value(), cur_level);
6177   }
6178   auto results = batch_rule(compressed_A_value, compressed_A_bdim, dense_B_value, dense_B_bdim, bias_value, bias_bdim, alpha_value, alpha_bdim, out_dtype, transpose_result, alg_id, split_k, split_k_mode);
6179   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6180 }
6181 template <typename batch_rule_t, batch_rule_t batch_rule>
6182 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _sparse_semi_structured_tile_generated_plumbing(const at::Tensor & input, c10::string_view algorithm, bool use_cutlass) {
6183   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6184   auto maybe_layer = maybeCurrentDynamicLayer();
6185   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6186   int64_t cur_level = maybe_layer->layerId();
6187   if (!isBatchedAtLevel(input, cur_level)) {
6188     return at::_ops::_sparse_semi_structured_tile::call(input, algorithm, use_cutlass);
6189   }
6190   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6191   auto results = batch_rule(input_value, input_bdim, algorithm, use_cutlass);
6192   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
6193 }
6194 template <typename batch_rule_t, batch_rule_t batch_rule>
6195 ::std::tuple<at::Tensor,at::Tensor> _sparse_semi_structured_apply_generated_plumbing(const at::Tensor & input, const at::Tensor & thread_masks) {
6196   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6197   auto maybe_layer = maybeCurrentDynamicLayer();
6198   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6199   int64_t cur_level = maybe_layer->layerId();
6200   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(thread_masks, cur_level)) {
6201     return at::_ops::_sparse_semi_structured_apply::call(input, thread_masks);
6202   }
6203   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6204   auto [thread_masks_value, thread_masks_bdim] = unwrapTensorAtLevel(thread_masks, cur_level);
6205   auto results = batch_rule(input_value, input_bdim, thread_masks_value, thread_masks_bdim);
6206   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6207 }
6208 template <typename batch_rule_t, batch_rule_t batch_rule>
6209 at::Tensor _sparse_semi_structured_apply_dense_generated_plumbing(const at::Tensor & input, const at::Tensor & thread_masks) {
6210   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6211   auto maybe_layer = maybeCurrentDynamicLayer();
6212   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6213   int64_t cur_level = maybe_layer->layerId();
6214   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(thread_masks, cur_level)) {
6215     return at::_ops::_sparse_semi_structured_apply_dense::call(input, thread_masks);
6216   }
6217   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6218   auto [thread_masks_value, thread_masks_bdim] = unwrapTensorAtLevel(thread_masks, cur_level);
6219   auto results = batch_rule(input_value, input_bdim, thread_masks_value, thread_masks_bdim);
6220   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6221 }
6222 template <typename batch_rule_t, batch_rule_t batch_rule>
6223 at::Tensor _sparse_semi_structured_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation, ::std::optional<at::ScalarType> out_dtype) {
6224   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6225   auto maybe_layer = maybeCurrentDynamicLayer();
6226   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6227   int64_t cur_level = maybe_layer->layerId();
6228   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(meta, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6229     return at::_ops::_sparse_semi_structured_linear::call(input, weight, meta, bias, activation, out_dtype);
6230   }
6231   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6232   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6233   auto [meta_value, meta_bdim] = unwrapTensorAtLevel(meta, cur_level);
6234   std::optional<Tensor> bias_value;
6235   std::optional<int64_t> bias_bdim;
6236   if (bias) {
6237       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6238   }
6239   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, meta_value, meta_bdim, bias_value, bias_bdim, activation, out_dtype);
6240   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6241 }
6242 template <typename batch_rule_t, batch_rule_t batch_rule>
6243 at::Tensor _sparse_semi_structured_mm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, ::std::optional<at::ScalarType> out_dtype) {
6244   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6245   auto maybe_layer = maybeCurrentDynamicLayer();
6246   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6247   int64_t cur_level = maybe_layer->layerId();
6248   if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat1_meta, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
6249     return at::_ops::_sparse_semi_structured_mm::call(mat1, mat1_meta, mat2, out_dtype);
6250   }
6251   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
6252   auto [mat1_meta_value, mat1_meta_bdim] = unwrapTensorAtLevel(mat1_meta, cur_level);
6253   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
6254   auto results = batch_rule(mat1_value, mat1_bdim, mat1_meta_value, mat1_meta_bdim, mat2_value, mat2_bdim, out_dtype);
6255   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6256 }
6257 template <typename batch_rule_t, batch_rule_t batch_rule>
6258 at::Tensor _sparse_semi_structured_addmm_generated_plumbing(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha, const at::Scalar & beta, ::std::optional<at::ScalarType> out_dtype) {
6259   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6260   auto maybe_layer = maybeCurrentDynamicLayer();
6261   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6262   int64_t cur_level = maybe_layer->layerId();
6263   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat1_meta, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
6264     return at::_ops::_sparse_semi_structured_addmm::call(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype);
6265   }
6266   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6267   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
6268   auto [mat1_meta_value, mat1_meta_bdim] = unwrapTensorAtLevel(mat1_meta, cur_level);
6269   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
6270   auto results = batch_rule(input_value, input_bdim, mat1_value, mat1_bdim, mat1_meta_value, mat1_meta_bdim, mat2_value, mat2_bdim, alpha, beta, out_dtype);
6271   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6272 }
6273 template <typename batch_rule_t, batch_rule_t batch_rule>
6274 at::Tensor _mixed_dtypes_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation) {
6275   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6276   auto maybe_layer = maybeCurrentDynamicLayer();
6277   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6278   int64_t cur_level = maybe_layer->layerId();
6279   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6280     return at::_ops::_mixed_dtypes_linear::call(input, weight, scale, bias, activation);
6281   }
6282   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6283   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6284   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
6285   std::optional<Tensor> bias_value;
6286   std::optional<int64_t> bias_bdim;
6287   if (bias) {
6288       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6289   }
6290   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, scale_value, scale_bdim, bias_value, bias_bdim, activation);
6291   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6292 }
6293 template <typename batch_rule_t, batch_rule_t batch_rule>
6294 at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
6295   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6296   auto maybe_layer = maybeCurrentDynamicLayer();
6297   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6298   int64_t cur_level = maybe_layer->layerId();
6299   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6300     return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
6301   }
6302   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6303   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6304   auto [packed_value, packed_bdim] = unwrapTensorAtLevel(packed, cur_level);
6305   auto [col_offsets_value, col_offsets_bdim] = unwrapTensorAtLevel(col_offsets, cur_level);
6306   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
6307   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
6308   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6309 }
6310 template <typename batch_rule_t, batch_rule_t batch_rule>
6311 at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
6312   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6313   auto maybe_layer = maybeCurrentDynamicLayer();
6314   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6315   int64_t cur_level = maybe_layer->layerId();
6316   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6317     return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
6318   }
6319   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6320   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6321   auto [packed_value, packed_bdim] = unwrapTensorAtLevel(packed, cur_level);
6322   auto [col_offsets_value, col_offsets_bdim] = unwrapTensorAtLevel(col_offsets, cur_level);
6323   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
6324   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
6325   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6326 }
6327 template <typename batch_rule_t, batch_rule_t batch_rule>
6328 at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) {
6329   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6330   auto maybe_layer = maybeCurrentDynamicLayer();
6331   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6332   int64_t cur_level = maybe_layer->layerId();
6333   if (!isBatchedAtLevel(input, cur_level)) {
6334     return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input);
6335   }
6336   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6337   auto results = batch_rule(input_value, input_bdim);
6338   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6339 }
6340 template <typename batch_rule_t, batch_rule_t batch_rule>
6341 at::Tensor _wrapped_linear_prepack_generated_plumbing(const at::Tensor & weight, const at::Tensor & weight_scale, const at::Tensor & weight_zero_point, const at::Tensor & bias) {
6342   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6343   auto maybe_layer = maybeCurrentDynamicLayer();
6344   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6345   int64_t cur_level = maybe_layer->layerId();
6346   if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_scale, cur_level) && !isBatchedAtLevel(weight_zero_point, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6347     return at::_ops::_wrapped_linear_prepack::call(weight, weight_scale, weight_zero_point, bias);
6348   }
6349   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
6350   auto [weight_scale_value, weight_scale_bdim] = unwrapTensorAtLevel(weight_scale, cur_level);
6351   auto [weight_zero_point_value, weight_zero_point_bdim] = unwrapTensorAtLevel(weight_zero_point, cur_level);
6352   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
6353   auto results = batch_rule(weight_value, weight_bdim, weight_scale_value, weight_scale_bdim, weight_zero_point_value, weight_zero_point_bdim, bias_value, bias_bdim);
6354   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6355 }
6356 template <typename batch_rule_t, batch_rule_t batch_rule>
6357 at::Tensor _wrapped_quantized_linear_prepacked_generated_plumbing(const at::Tensor & input, const at::Tensor & input_scale, const at::Tensor & input_zero_point, const at::Tensor & packed_weight, const at::Tensor & output_scale, const at::Tensor & output_zero_point, int64_t out_channel) {
6358   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6359   auto maybe_layer = maybeCurrentDynamicLayer();
6360   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6361   int64_t cur_level = maybe_layer->layerId();
6362   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(input_scale, cur_level) && !isBatchedAtLevel(input_zero_point, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(output_scale, cur_level) && !isBatchedAtLevel(output_zero_point, cur_level)) {
6363     return at::_ops::_wrapped_quantized_linear_prepacked::call(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel);
6364   }
6365   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6366   auto [input_scale_value, input_scale_bdim] = unwrapTensorAtLevel(input_scale, cur_level);
6367   auto [input_zero_point_value, input_zero_point_bdim] = unwrapTensorAtLevel(input_zero_point, cur_level);
6368   auto [packed_weight_value, packed_weight_bdim] = unwrapTensorAtLevel(packed_weight, cur_level);
6369   auto [output_scale_value, output_scale_bdim] = unwrapTensorAtLevel(output_scale, cur_level);
6370   auto [output_zero_point_value, output_zero_point_bdim] = unwrapTensorAtLevel(output_zero_point, cur_level);
6371   auto results = batch_rule(input_value, input_bdim, input_scale_value, input_scale_bdim, input_zero_point_value, input_zero_point_bdim, packed_weight_value, packed_weight_bdim, output_scale_value, output_scale_bdim, output_zero_point_value, output_zero_point_bdim, out_channel);
6372   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6373 }
6374 template <typename batch_rule_t, batch_rule_t batch_rule>
6375 at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
6376   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6377   auto maybe_layer = maybeCurrentDynamicLayer();
6378   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6379   int64_t cur_level = maybe_layer->layerId();
6380   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6381     return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias);
6382   }
6383   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6384   auto [packed_weight_value, packed_weight_bdim] = unwrapTensorAtLevel(packed_weight, cur_level);
6385   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
6386   auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
6387   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6388 }
6389 template <typename batch_rule_t, batch_rule_t batch_rule>
6390 at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
6391   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6392   auto maybe_layer = maybeCurrentDynamicLayer();
6393   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6394   int64_t cur_level = maybe_layer->layerId();
6395   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6396     return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias);
6397   }
6398   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6399   auto [packed_weight_value, packed_weight_bdim] = unwrapTensorAtLevel(packed_weight, cur_level);
6400   auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level);
6401   auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
6402   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6403 }
6404 template <typename batch_rule_t, batch_rule_t batch_rule>
6405 at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) {
6406   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6407   auto maybe_layer = maybeCurrentDynamicLayer();
6408   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6409   int64_t cur_level = maybe_layer->layerId();
6410   if (!isBatchedAtLevel(input, cur_level)) {
6411     return at::_ops::fbgemm_pack_quantized_matrix::call(input);
6412   }
6413   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6414   auto results = batch_rule(input_value, input_bdim);
6415   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6416 }
6417 template <typename batch_rule_t, batch_rule_t batch_rule>
6418 at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) {
6419   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6420   auto maybe_layer = maybeCurrentDynamicLayer();
6421   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6422   int64_t cur_level = maybe_layer->layerId();
6423   if (!isBatchedAtLevel(input, cur_level)) {
6424     return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N);
6425   }
6426   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6427   auto results = batch_rule(input_value, input_bdim, K, N);
6428   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6429 }
6430 template <typename batch_rule_t, batch_rule_t batch_rule>
6431 at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6432   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6433   auto maybe_layer = maybeCurrentDynamicLayer();
6434   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6435   int64_t cur_level = maybe_layer->layerId();
6436   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6437     return at::_ops::ldexp_Tensor::call(self, other);
6438   }
6439   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6440   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6441   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6442   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6443 }
6444 template <typename batch_rule_t, batch_rule_t batch_rule>
6445 at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
6446   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6447   auto maybe_layer = maybeCurrentDynamicLayer();
6448   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6449   int64_t cur_level = maybe_layer->layerId();
6450   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6451     return at::_ops::ldexp_::call(self, other);
6452   }
6453   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6454   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6455   batch_rule(self_value, self_bdim, other_value, other_bdim);
6456   return self;
6457 }
6458 template <typename batch_rule_t, batch_rule_t batch_rule>
6459 at::Tensor linspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
6460   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6461   auto maybe_layer = maybeCurrentDynamicLayer();
6462   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6463   int64_t cur_level = maybe_layer->layerId();
6464   if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) {
6465     return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory);
6466   }
6467   auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level);
6468   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
6469   auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, dtype, layout, device, pin_memory);
6470   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6471 }
6472 template <typename batch_rule_t, batch_rule_t batch_rule>
6473 at::Tensor linspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
6474   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6475   auto maybe_layer = maybeCurrentDynamicLayer();
6476   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6477   int64_t cur_level = maybe_layer->layerId();
6478   if (!isBatchedAtLevel(start, cur_level)) {
6479     return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory);
6480   }
6481   auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level);
6482   auto results = batch_rule(start_value, start_bdim, end, steps, dtype, layout, device, pin_memory);
6483   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6484 }
6485 template <typename batch_rule_t, batch_rule_t batch_rule>
6486 at::Tensor linspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
6487   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6488   auto maybe_layer = maybeCurrentDynamicLayer();
6489   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6490   int64_t cur_level = maybe_layer->layerId();
6491   if (!isBatchedAtLevel(end, cur_level)) {
6492     return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory);
6493   }
6494   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
6495   auto results = batch_rule(start, end_value, end_bdim, steps, dtype, layout, device, pin_memory);
6496   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6497 }
6498 template <typename batch_rule_t, batch_rule_t batch_rule>
6499 at::Tensor log_generated_plumbing(const at::Tensor & self) {
6500   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6501   auto maybe_layer = maybeCurrentDynamicLayer();
6502   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6503   int64_t cur_level = maybe_layer->layerId();
6504   if (!isBatchedAtLevel(self, cur_level)) {
6505     return at::_ops::log::call(self);
6506   }
6507   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6508   auto results = batch_rule(self_value, self_bdim);
6509   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6510 }
6511 template <typename batch_rule_t, batch_rule_t batch_rule>
6512 at::Tensor & log__generated_plumbing(at::Tensor & self) {
6513   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6514   auto maybe_layer = maybeCurrentDynamicLayer();
6515   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6516   int64_t cur_level = maybe_layer->layerId();
6517   if (!isBatchedAtLevel(self, cur_level)) {
6518     return at::_ops::log_::call(self);
6519   }
6520   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6521   batch_rule(self_value, self_bdim);
6522   return self;
6523 }
6524 template <typename batch_rule_t, batch_rule_t batch_rule>
6525 at::Tensor log10_generated_plumbing(const at::Tensor & self) {
6526   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6527   auto maybe_layer = maybeCurrentDynamicLayer();
6528   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6529   int64_t cur_level = maybe_layer->layerId();
6530   if (!isBatchedAtLevel(self, cur_level)) {
6531     return at::_ops::log10::call(self);
6532   }
6533   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6534   auto results = batch_rule(self_value, self_bdim);
6535   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6536 }
6537 template <typename batch_rule_t, batch_rule_t batch_rule>
6538 at::Tensor & log10__generated_plumbing(at::Tensor & self) {
6539   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6540   auto maybe_layer = maybeCurrentDynamicLayer();
6541   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6542   int64_t cur_level = maybe_layer->layerId();
6543   if (!isBatchedAtLevel(self, cur_level)) {
6544     return at::_ops::log10_::call(self);
6545   }
6546   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6547   batch_rule(self_value, self_bdim);
6548   return self;
6549 }
6550 template <typename batch_rule_t, batch_rule_t batch_rule>
6551 at::Tensor log1p_generated_plumbing(const at::Tensor & self) {
6552   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6553   auto maybe_layer = maybeCurrentDynamicLayer();
6554   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6555   int64_t cur_level = maybe_layer->layerId();
6556   if (!isBatchedAtLevel(self, cur_level)) {
6557     return at::_ops::log1p::call(self);
6558   }
6559   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6560   auto results = batch_rule(self_value, self_bdim);
6561   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6562 }
6563 template <typename batch_rule_t, batch_rule_t batch_rule>
6564 at::Tensor & log1p__generated_plumbing(at::Tensor & self) {
6565   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6566   auto maybe_layer = maybeCurrentDynamicLayer();
6567   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6568   int64_t cur_level = maybe_layer->layerId();
6569   if (!isBatchedAtLevel(self, cur_level)) {
6570     return at::_ops::log1p_::call(self);
6571   }
6572   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6573   batch_rule(self_value, self_bdim);
6574   return self;
6575 }
6576 template <typename batch_rule_t, batch_rule_t batch_rule>
6577 at::Tensor log2_generated_plumbing(const at::Tensor & self) {
6578   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6579   auto maybe_layer = maybeCurrentDynamicLayer();
6580   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6581   int64_t cur_level = maybe_layer->layerId();
6582   if (!isBatchedAtLevel(self, cur_level)) {
6583     return at::_ops::log2::call(self);
6584   }
6585   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6586   auto results = batch_rule(self_value, self_bdim);
6587   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6588 }
6589 template <typename batch_rule_t, batch_rule_t batch_rule>
6590 at::Tensor & log2__generated_plumbing(at::Tensor & self) {
6591   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6592   auto maybe_layer = maybeCurrentDynamicLayer();
6593   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6594   int64_t cur_level = maybe_layer->layerId();
6595   if (!isBatchedAtLevel(self, cur_level)) {
6596     return at::_ops::log2_::call(self);
6597   }
6598   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6599   batch_rule(self_value, self_bdim);
6600   return self;
6601 }
6602 template <typename batch_rule_t, batch_rule_t batch_rule>
6603 at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6604   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6605   auto maybe_layer = maybeCurrentDynamicLayer();
6606   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6607   int64_t cur_level = maybe_layer->layerId();
6608   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6609     return at::_ops::logaddexp::call(self, other);
6610   }
6611   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6612   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6613   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6614   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6615 }
6616 template <typename batch_rule_t, batch_rule_t batch_rule>
6617 at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6618   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6619   auto maybe_layer = maybeCurrentDynamicLayer();
6620   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6621   int64_t cur_level = maybe_layer->layerId();
6622   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6623     return at::_ops::logaddexp2::call(self, other);
6624   }
6625   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6626   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6627   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6628   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6629 }
6630 template <typename batch_rule_t, batch_rule_t batch_rule>
6631 at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6632   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6633   auto maybe_layer = maybeCurrentDynamicLayer();
6634   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6635   int64_t cur_level = maybe_layer->layerId();
6636   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6637     return at::_ops::xlogy_Tensor::call(self, other);
6638   }
6639   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6640   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6641   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6642   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6643 }
6644 template <typename batch_rule_t, batch_rule_t batch_rule>
6645 at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
6646   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6647   auto maybe_layer = maybeCurrentDynamicLayer();
6648   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6649   int64_t cur_level = maybe_layer->layerId();
6650   if (!isBatchedAtLevel(other, cur_level)) {
6651     return at::_ops::xlogy_Scalar_Self::call(self, other);
6652   }
6653   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6654   auto results = batch_rule(self, other_value, other_bdim);
6655   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6656 }
6657 template <typename batch_rule_t, batch_rule_t batch_rule>
6658 at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
6659   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6660   auto maybe_layer = maybeCurrentDynamicLayer();
6661   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6662   int64_t cur_level = maybe_layer->layerId();
6663   if (!isBatchedAtLevel(self, cur_level)) {
6664     return at::_ops::xlogy_Scalar_Other::call(self, other);
6665   }
6666   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6667   auto results = batch_rule(self_value, self_bdim, other);
6668   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6669 }
6670 template <typename batch_rule_t, batch_rule_t batch_rule>
6671 at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
6672   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6673   auto maybe_layer = maybeCurrentDynamicLayer();
6674   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6675   int64_t cur_level = maybe_layer->layerId();
6676   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6677     return at::_ops::xlogy__Tensor::call(self, other);
6678   }
6679   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6680   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6681   batch_rule(self_value, self_bdim, other_value, other_bdim);
6682   return self;
6683 }
6684 template <typename batch_rule_t, batch_rule_t batch_rule>
6685 at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
6686   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6687   auto maybe_layer = maybeCurrentDynamicLayer();
6688   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6689   int64_t cur_level = maybe_layer->layerId();
6690   if (!isBatchedAtLevel(self, cur_level)) {
6691     return at::_ops::xlogy__Scalar_Other::call(self, other);
6692   }
6693   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6694   batch_rule(self_value, self_bdim, other);
6695   return self;
6696 }
6697 template <typename batch_rule_t, batch_rule_t batch_rule>
6698 at::Tensor logspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
6699   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6700   auto maybe_layer = maybeCurrentDynamicLayer();
6701   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6702   int64_t cur_level = maybe_layer->layerId();
6703   if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) {
6704     return at::_ops::logspace_Tensor_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory);
6705   }
6706   auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level);
6707   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
6708   auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory);
6709   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6710 }
6711 template <typename batch_rule_t, batch_rule_t batch_rule>
6712 at::Tensor logspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
6713   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6714   auto maybe_layer = maybeCurrentDynamicLayer();
6715   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6716   int64_t cur_level = maybe_layer->layerId();
6717   if (!isBatchedAtLevel(start, cur_level)) {
6718     return at::_ops::logspace_Tensor_Scalar::call(start, end, steps, base, dtype, layout, device, pin_memory);
6719   }
6720   auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level);
6721   auto results = batch_rule(start_value, start_bdim, end, steps, base, dtype, layout, device, pin_memory);
6722   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6723 }
6724 template <typename batch_rule_t, batch_rule_t batch_rule>
6725 at::Tensor logspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
6726   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6727   auto maybe_layer = maybeCurrentDynamicLayer();
6728   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6729   int64_t cur_level = maybe_layer->layerId();
6730   if (!isBatchedAtLevel(end, cur_level)) {
6731     return at::_ops::logspace_Scalar_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory);
6732   }
6733   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
6734   auto results = batch_rule(start, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory);
6735   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6736 }
6737 template <typename batch_rule_t, batch_rule_t batch_rule>
6738 at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
6739   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6740   auto maybe_layer = maybeCurrentDynamicLayer();
6741   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6742   int64_t cur_level = maybe_layer->layerId();
6743   if (!isBatchedAtLevel(self, cur_level)) {
6744     return at::_ops::log_softmax_int::call(self, dim, dtype);
6745   }
6746   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6747   auto results = batch_rule(self_value, self_bdim, dim, dtype);
6748   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6749 }
6750 template <typename batch_rule_t, batch_rule_t batch_rule>
6751 at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
6752   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6753   auto maybe_layer = maybeCurrentDynamicLayer();
6754   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6755   int64_t cur_level = maybe_layer->layerId();
6756   if (!isBatchedAtLevel(self, cur_level)) {
6757     return at::_ops::log_softmax_Dimname::call(self, dim, dtype);
6758   }
6759   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6760   auto results = batch_rule(self_value, self_bdim, dim, dtype);
6761   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6762 }
6763 template <typename batch_rule_t, batch_rule_t batch_rule>
6764 at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
6765   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6766   auto maybe_layer = maybeCurrentDynamicLayer();
6767   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6768   int64_t cur_level = maybe_layer->layerId();
6769   if (!isBatchedAtLevel(self, cur_level)) {
6770     return at::_ops::_log_softmax::call(self, dim, half_to_float);
6771   }
6772   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6773   auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
6774   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6775 }
6776 template <typename batch_rule_t, batch_rule_t batch_rule>
6777 at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
6778   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6779   auto maybe_layer = maybeCurrentDynamicLayer();
6780   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6781   int64_t cur_level = maybe_layer->layerId();
6782   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
6783     return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
6784   }
6785   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
6786   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
6787   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
6788   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6789 }
6790 template <typename batch_rule_t, batch_rule_t batch_rule>
6791 at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
6792   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6793   auto maybe_layer = maybeCurrentDynamicLayer();
6794   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6795   int64_t cur_level = maybe_layer->layerId();
6796   if (!isBatchedAtLevel(self, cur_level)) {
6797     return at::_ops::_logcumsumexp::call(self, dim);
6798   }
6799   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6800   auto results = batch_rule(self_value, self_bdim, dim);
6801   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6802 }
6803 template <typename batch_rule_t, batch_rule_t batch_rule>
6804 at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
6805   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6806   auto maybe_layer = maybeCurrentDynamicLayer();
6807   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6808   int64_t cur_level = maybe_layer->layerId();
6809   if (!isBatchedAtLevel(self, cur_level)) {
6810     return at::_ops::logcumsumexp::call(self, dim);
6811   }
6812   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6813   auto results = batch_rule(self_value, self_bdim, dim);
6814   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6815 }
6816 template <typename batch_rule_t, batch_rule_t batch_rule>
6817 at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
6818   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6819   auto maybe_layer = maybeCurrentDynamicLayer();
6820   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6821   int64_t cur_level = maybe_layer->layerId();
6822   if (!isBatchedAtLevel(self, cur_level)) {
6823     return at::_ops::logcumsumexp_dimname::call(self, dim);
6824   }
6825   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6826   auto results = batch_rule(self_value, self_bdim, dim);
6827   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6828 }
6829 template <typename batch_rule_t, batch_rule_t batch_rule>
6830 at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
6831   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6832   auto maybe_layer = maybeCurrentDynamicLayer();
6833   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6834   int64_t cur_level = maybe_layer->layerId();
6835   if (!isBatchedAtLevel(self, cur_level)) {
6836     return at::_ops::logsumexp::call(self, dim, keepdim);
6837   }
6838   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6839   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
6840   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6841 }
6842 template <typename batch_rule_t, batch_rule_t batch_rule>
6843 at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
6844   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6845   auto maybe_layer = maybeCurrentDynamicLayer();
6846   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6847   int64_t cur_level = maybe_layer->layerId();
6848   if (!isBatchedAtLevel(self, cur_level)) {
6849     return at::_ops::logsumexp_names::call(self, dim, keepdim);
6850   }
6851   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6852   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
6853   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6854 }
6855 template <typename batch_rule_t, batch_rule_t batch_rule>
6856 at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
6857   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6858   auto maybe_layer = maybeCurrentDynamicLayer();
6859   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6860   int64_t cur_level = maybe_layer->layerId();
6861   if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
6862     return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction);
6863   }
6864   auto [input1_value, input1_bdim] = unwrapTensorAtLevel(input1, cur_level);
6865   auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level);
6866   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
6867   auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
6868   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6869 }
6870 template <typename batch_rule_t, batch_rule_t batch_rule>
6871 at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6872   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6873   auto maybe_layer = maybeCurrentDynamicLayer();
6874   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6875   int64_t cur_level = maybe_layer->layerId();
6876   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6877     return at::_ops::matmul::call(self, other);
6878   }
6879   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6880   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6881   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6882   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6883 }
6884 template <typename batch_rule_t, batch_rule_t batch_rule>
6885 ::std::tuple<at::Tensor,at::Tensor> matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
6886   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6887   auto maybe_layer = maybeCurrentDynamicLayer();
6888   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6889   int64_t cur_level = maybe_layer->layerId();
6890   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6891     return at::_ops::matmul_backward::call(grad, self, other, mask);
6892   }
6893   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
6894   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6895   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
6896   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask);
6897   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6898 }
6899 template <typename batch_rule_t, batch_rule_t batch_rule>
6900 at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
6901   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6902   auto maybe_layer = maybeCurrentDynamicLayer();
6903   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6904   int64_t cur_level = maybe_layer->layerId();
6905   if (!isBatchedAtLevel(self, cur_level)) {
6906     return at::_ops::matrix_power::call(self, n);
6907   }
6908   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6909   auto results = batch_rule(self_value, self_bdim, n);
6910   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6911 }
6912 template <typename batch_rule_t, batch_rule_t batch_rule>
6913 at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) {
6914   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6915   auto maybe_layer = maybeCurrentDynamicLayer();
6916   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6917   int64_t cur_level = maybe_layer->layerId();
6918   if (!isBatchedAtLevel(self, cur_level)) {
6919     return at::_ops::matrix_exp::call(self);
6920   }
6921   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6922   auto results = batch_rule(self_value, self_bdim);
6923   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6924 }
6925 template <typename batch_rule_t, batch_rule_t batch_rule>
6926 at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) {
6927   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6928   auto maybe_layer = maybeCurrentDynamicLayer();
6929   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6930   int64_t cur_level = maybe_layer->layerId();
6931   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
6932     return at::_ops::matrix_exp_backward::call(self, grad);
6933   }
6934   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6935   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
6936   auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim);
6937   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6938 }
6939 template <typename batch_rule_t, batch_rule_t batch_rule>
6940 ::std::tuple<at::Tensor,at::Tensor> _aminmax_generated_plumbing(const at::Tensor & self) {
6941   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6942   auto maybe_layer = maybeCurrentDynamicLayer();
6943   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6944   int64_t cur_level = maybe_layer->layerId();
6945   if (!isBatchedAtLevel(self, cur_level)) {
6946     return at::_ops::_aminmax::call(self);
6947   }
6948   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6949   auto results = batch_rule(self_value, self_bdim);
6950   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6951 }
6952 template <typename batch_rule_t, batch_rule_t batch_rule>
6953 ::std::tuple<at::Tensor,at::Tensor> _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
6954   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6955   auto maybe_layer = maybeCurrentDynamicLayer();
6956   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6957   int64_t cur_level = maybe_layer->layerId();
6958   if (!isBatchedAtLevel(self, cur_level)) {
6959     return at::_ops::_aminmax_dim::call(self, dim, keepdim);
6960   }
6961   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6962   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
6963   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6964 }
6965 template <typename batch_rule_t, batch_rule_t batch_rule>
6966 ::std::tuple<at::Tensor,at::Tensor> aminmax_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
6967   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6968   auto maybe_layer = maybeCurrentDynamicLayer();
6969   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6970   int64_t cur_level = maybe_layer->layerId();
6971   if (!isBatchedAtLevel(self, cur_level)) {
6972     return at::_ops::aminmax::call(self, dim, keepdim);
6973   }
6974   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
6975   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
6976   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6977 }
6978 template <typename batch_rule_t, batch_rule_t batch_rule>
6979 at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) {
6980   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6981   auto maybe_layer = maybeCurrentDynamicLayer();
6982   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6983   int64_t cur_level = maybe_layer->layerId();
6984   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) {
6985     return at::_ops::_compute_linear_combination::call(input, coefficients);
6986   }
6987   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
6988   auto [coefficients_value, coefficients_bdim] = unwrapTensorAtLevel(coefficients, cur_level);
6989   auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim);
6990   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6991 }
6992 template <typename batch_rule_t, batch_rule_t batch_rule>
6993 ::std::tuple<at::Tensor,at::Tensor> max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
6994   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6995   auto maybe_layer = maybeCurrentDynamicLayer();
6996   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6997   int64_t cur_level = maybe_layer->layerId();
6998   if (!isBatchedAtLevel(self, cur_level)) {
6999     return at::_ops::max_dim::call(self, dim, keepdim);
7000   }
7001   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7002   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7003   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7004 }
7005 template <typename batch_rule_t, batch_rule_t batch_rule>
7006 ::std::tuple<at::Tensor,at::Tensor> max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
7007   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7008   auto maybe_layer = maybeCurrentDynamicLayer();
7009   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7010   int64_t cur_level = maybe_layer->layerId();
7011   if (!isBatchedAtLevel(self, cur_level)) {
7012     return at::_ops::max_names_dim::call(self, dim, keepdim);
7013   }
7014   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7015   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7016   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7017 }
7018 template <typename batch_rule_t, batch_rule_t batch_rule>
7019 at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
7020   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7021   auto maybe_layer = maybeCurrentDynamicLayer();
7022   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7023   int64_t cur_level = maybe_layer->layerId();
7024   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
7025     return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
7026   }
7027   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
7028   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
7029   auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim);
7030   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7031 }
7032 template <typename batch_rule_t, batch_rule_t batch_rule>
7033 at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
7034   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7035   auto maybe_layer = maybeCurrentDynamicLayer();
7036   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7037   int64_t cur_level = maybe_layer->layerId();
7038   if (!isBatchedAtLevel(self, cur_level)) {
7039     return at::_ops::amax::call(self, dim, keepdim);
7040   }
7041   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7042   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7043   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7044 }
7045 template <typename batch_rule_t, batch_rule_t batch_rule>
7046 ::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7047   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7048   auto maybe_layer = maybeCurrentDynamicLayer();
7049   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7050   int64_t cur_level = maybe_layer->layerId();
7051   if (!isBatchedAtLevel(self, cur_level)) {
7052     return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7053   }
7054   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7055   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7056   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7057 }
7058 template <typename batch_rule_t, batch_rule_t batch_rule>
7059 at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7060   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7061   auto maybe_layer = maybeCurrentDynamicLayer();
7062   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7063   int64_t cur_level = maybe_layer->layerId();
7064   if (!isBatchedAtLevel(self, cur_level)) {
7065     return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7066   }
7067   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7068   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7069   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7070 }
7071 template <typename batch_rule_t, batch_rule_t batch_rule>
7072 at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7073   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7074   auto maybe_layer = maybeCurrentDynamicLayer();
7075   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7076   int64_t cur_level = maybe_layer->layerId();
7077   if (!isBatchedAtLevel(self, cur_level)) {
7078     return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7079   }
7080   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7081   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7082   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7083 }
7084 template <typename batch_rule_t, batch_rule_t batch_rule>
7085 at::Tensor max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7086   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7087   auto maybe_layer = maybeCurrentDynamicLayer();
7088   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7089   int64_t cur_level = maybe_layer->layerId();
7090   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
7091     return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
7092   }
7093   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
7094   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7095   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7096   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7097 }
7098 template <typename batch_rule_t, batch_rule_t batch_rule>
7099 at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7100   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7101   auto maybe_layer = maybeCurrentDynamicLayer();
7102   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7103   int64_t cur_level = maybe_layer->layerId();
7104   if (!isBatchedAtLevel(self, cur_level)) {
7105     return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7106   }
7107   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7108   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7109   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7110 }
7111 template <typename batch_rule_t, batch_rule_t batch_rule>
7112 at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7113   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7114   auto maybe_layer = maybeCurrentDynamicLayer();
7115   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7116   int64_t cur_level = maybe_layer->layerId();
7117   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
7118     return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
7119   }
7120   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
7121   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
7122   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7123   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7124   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7125 }
7126 template <typename batch_rule_t, batch_rule_t batch_rule>
7127 at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7128   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7129   auto maybe_layer = maybeCurrentDynamicLayer();
7130   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7131   int64_t cur_level = maybe_layer->layerId();
7132   if (!isBatchedAtLevel(self, cur_level)) {
7133     return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7134   }
7135   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7136   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7137   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7138 }
7139 template <typename batch_rule_t, batch_rule_t batch_rule>
7140 at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7141   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7142   auto maybe_layer = maybeCurrentDynamicLayer();
7143   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7144   int64_t cur_level = maybe_layer->layerId();
7145   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
7146     return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
7147   }
7148   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
7149   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
7150   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7151   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7152   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7153 }
7154 template <typename batch_rule_t, batch_rule_t batch_rule>
7155 at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7156   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7157   auto maybe_layer = maybeCurrentDynamicLayer();
7158   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7159   int64_t cur_level = maybe_layer->layerId();
7160   if (!isBatchedAtLevel(self, cur_level)) {
7161     return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7162   }
7163   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7164   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7165   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7166 }
7167 template <typename batch_rule_t, batch_rule_t batch_rule>
7168 at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7169   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7170   auto maybe_layer = maybeCurrentDynamicLayer();
7171   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7172   int64_t cur_level = maybe_layer->layerId();
7173   if (!isBatchedAtLevel(self, cur_level)) {
7174     return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7175   }
7176   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7177   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7178   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7179 }
7180 template <typename batch_rule_t, batch_rule_t batch_rule>
7181 at::Tensor quantized_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7182   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7183   auto maybe_layer = maybeCurrentDynamicLayer();
7184   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7185   int64_t cur_level = maybe_layer->layerId();
7186   if (!isBatchedAtLevel(self, cur_level)) {
7187     return at::_ops::quantized_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7188   }
7189   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7190   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7191   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7192 }
7193 template <typename batch_rule_t, batch_rule_t batch_rule>
7194 at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7195   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7196   auto maybe_layer = maybeCurrentDynamicLayer();
7197   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7198   int64_t cur_level = maybe_layer->layerId();
7199   if (!isBatchedAtLevel(self, cur_level)) {
7200     return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7201   }
7202   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7203   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7204   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7205 }
7206 template <typename batch_rule_t, batch_rule_t batch_rule>
7207 at::Tensor mean_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
7208   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7209   auto maybe_layer = maybeCurrentDynamicLayer();
7210   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7211   int64_t cur_level = maybe_layer->layerId();
7212   if (!isBatchedAtLevel(self, cur_level)) {
7213     return at::_ops::mean::call(self, dtype);
7214   }
7215   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7216   auto results = batch_rule(self_value, self_bdim, dtype);
7217   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7218 }
7219 template <typename batch_rule_t, batch_rule_t batch_rule>
7220 at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
7221   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7222   auto maybe_layer = maybeCurrentDynamicLayer();
7223   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7224   int64_t cur_level = maybe_layer->layerId();
7225   if (!isBatchedAtLevel(self, cur_level)) {
7226     return at::_ops::mean_dim::call(self, dim, keepdim, dtype);
7227   }
7228   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7229   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
7230   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7231 }
7232 template <typename batch_rule_t, batch_rule_t batch_rule>
7233 at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
7234   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7235   auto maybe_layer = maybeCurrentDynamicLayer();
7236   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7237   int64_t cur_level = maybe_layer->layerId();
7238   if (!isBatchedAtLevel(self, cur_level)) {
7239     return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype);
7240   }
7241   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7242   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
7243   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7244 }
7245 template <typename batch_rule_t, batch_rule_t batch_rule>
7246 at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
7247   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7248   auto maybe_layer = maybeCurrentDynamicLayer();
7249   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7250   int64_t cur_level = maybe_layer->layerId();
7251   if (!isBatchedAtLevel(self, cur_level)) {
7252     return at::_ops::nanmean::call(self, dim, keepdim, dtype);
7253   }
7254   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7255   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
7256   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7257 }
7258 template <typename batch_rule_t, batch_rule_t batch_rule>
7259 at::Tensor median_generated_plumbing(const at::Tensor & self) {
7260   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7261   auto maybe_layer = maybeCurrentDynamicLayer();
7262   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7263   int64_t cur_level = maybe_layer->layerId();
7264   if (!isBatchedAtLevel(self, cur_level)) {
7265     return at::_ops::median::call(self);
7266   }
7267   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7268   auto results = batch_rule(self_value, self_bdim);
7269   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7270 }
7271 template <typename batch_rule_t, batch_rule_t batch_rule>
7272 ::std::tuple<at::Tensor,at::Tensor> median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
7273   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7274   auto maybe_layer = maybeCurrentDynamicLayer();
7275   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7276   int64_t cur_level = maybe_layer->layerId();
7277   if (!isBatchedAtLevel(self, cur_level)) {
7278     return at::_ops::median_dim::call(self, dim, keepdim);
7279   }
7280   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7281   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7282   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7283 }
7284 template <typename batch_rule_t, batch_rule_t batch_rule>
7285 ::std::tuple<at::Tensor,at::Tensor> median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
7286   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7287   auto maybe_layer = maybeCurrentDynamicLayer();
7288   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7289   int64_t cur_level = maybe_layer->layerId();
7290   if (!isBatchedAtLevel(self, cur_level)) {
7291     return at::_ops::median_names_dim::call(self, dim, keepdim);
7292   }
7293   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7294   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7295   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7296 }
7297 template <typename batch_rule_t, batch_rule_t batch_rule>
7298 at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) {
7299   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7300   auto maybe_layer = maybeCurrentDynamicLayer();
7301   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7302   int64_t cur_level = maybe_layer->layerId();
7303   if (!isBatchedAtLevel(self, cur_level)) {
7304     return at::_ops::nanmedian::call(self);
7305   }
7306   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7307   auto results = batch_rule(self_value, self_bdim);
7308   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7309 }
7310 template <typename batch_rule_t, batch_rule_t batch_rule>
7311 ::std::tuple<at::Tensor,at::Tensor> nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
7312   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7313   auto maybe_layer = maybeCurrentDynamicLayer();
7314   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7315   int64_t cur_level = maybe_layer->layerId();
7316   if (!isBatchedAtLevel(self, cur_level)) {
7317     return at::_ops::nanmedian_dim::call(self, dim, keepdim);
7318   }
7319   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7320   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7321   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7322 }
7323 template <typename batch_rule_t, batch_rule_t batch_rule>
7324 ::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
7325   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7326   auto maybe_layer = maybeCurrentDynamicLayer();
7327   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7328   int64_t cur_level = maybe_layer->layerId();
7329   if (!isBatchedAtLevel(self, cur_level)) {
7330     return at::_ops::nanmedian_names_dim::call(self, dim, keepdim);
7331   }
7332   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7333   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7334   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7335 }
7336 template <typename batch_rule_t, batch_rule_t batch_rule>
7337 ::std::tuple<at::Tensor,at::Tensor> min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
7338   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7339   auto maybe_layer = maybeCurrentDynamicLayer();
7340   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7341   int64_t cur_level = maybe_layer->layerId();
7342   if (!isBatchedAtLevel(self, cur_level)) {
7343     return at::_ops::min_dim::call(self, dim, keepdim);
7344   }
7345   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7346   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7347   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7348 }
7349 template <typename batch_rule_t, batch_rule_t batch_rule>
7350 ::std::tuple<at::Tensor,at::Tensor> min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
7351   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7352   auto maybe_layer = maybeCurrentDynamicLayer();
7353   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7354   int64_t cur_level = maybe_layer->layerId();
7355   if (!isBatchedAtLevel(self, cur_level)) {
7356     return at::_ops::min_names_dim::call(self, dim, keepdim);
7357   }
7358   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7359   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7360   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7361 }
7362 template <typename batch_rule_t, batch_rule_t batch_rule>
7363 at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
7364   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7365   auto maybe_layer = maybeCurrentDynamicLayer();
7366   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7367   int64_t cur_level = maybe_layer->layerId();
7368   if (!isBatchedAtLevel(self, cur_level)) {
7369     return at::_ops::amin::call(self, dim, keepdim);
7370   }
7371   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7372   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7373   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7374 }
7375 template <typename batch_rule_t, batch_rule_t batch_rule>
7376 at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
7377   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7378   auto maybe_layer = maybeCurrentDynamicLayer();
7379   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7380   int64_t cur_level = maybe_layer->layerId();
7381   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7382     return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups);
7383   }
7384   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7385   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7386   std::optional<Tensor> bias_value;
7387   std::optional<int64_t> bias_bdim;
7388   if (bias) {
7389       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7390   }
7391   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
7392   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7393 }
7394 template <typename batch_rule_t, batch_rule_t batch_rule>
7395 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask) {
7396   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7397   auto maybe_layer = maybeCurrentDynamicLayer();
7398   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7399   int64_t cur_level = maybe_layer->layerId();
7400   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
7401     return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
7402   }
7403   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7404   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
7405   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7406   auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask);
7407   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
7408 }
7409 template <typename batch_rule_t, batch_rule_t batch_rule>
7410 at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
7411   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7412   auto maybe_layer = maybeCurrentDynamicLayer();
7413   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7414   int64_t cur_level = maybe_layer->layerId();
7415   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7416     return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
7417   }
7418   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7419   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7420   std::optional<Tensor> bias_value;
7421   std::optional<int64_t> bias_bdim;
7422   if (bias) {
7423       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7424   }
7425   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
7426   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7427 }
7428 template <typename batch_rule_t, batch_rule_t batch_rule>
7429 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
7430   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7431   auto maybe_layer = maybeCurrentDynamicLayer();
7432   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7433   int64_t cur_level = maybe_layer->layerId();
7434   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) {
7435     return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
7436   }
7437   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7438   auto [weight0_value, weight0_bdim] = unwrapTensorAtLevel(weight0, cur_level);
7439   auto [weight1_value, weight1_bdim] = unwrapTensorAtLevel(weight1, cur_level);
7440   auto [weight2_value, weight2_bdim] = unwrapTensorAtLevel(weight2, cur_level);
7441   auto [weight3_value, weight3_bdim] = unwrapTensorAtLevel(weight3, cur_level);
7442   auto [hx__value, hx__bdim] = unwrapTensorAtLevel(hx_, cur_level);
7443   auto [cx__value, cx__bdim] = unwrapTensorAtLevel(cx_, cur_level);
7444   auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
7445   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
7446 }
7447 template <typename batch_rule_t, batch_rule_t batch_rule>
7448 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
7449   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7450   auto maybe_layer = maybeCurrentDynamicLayer();
7451   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7452   int64_t cur_level = maybe_layer->layerId();
7453   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
7454     return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
7455   }
7456   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7457   auto [weight1_value, weight1_bdim] = unwrapTensorAtLevel(weight1, cur_level);
7458   auto [weight2_value, weight2_bdim] = unwrapTensorAtLevel(weight2, cur_level);
7459   auto [weight3_value, weight3_bdim] = unwrapTensorAtLevel(weight3, cur_level);
7460   auto [weight4_value, weight4_bdim] = unwrapTensorAtLevel(weight4, cur_level);
7461   auto [hx__value, hx__bdim] = unwrapTensorAtLevel(hx_, cur_level);
7462   auto [cx_tmp_value, cx_tmp_bdim] = unwrapTensorAtLevel(cx_tmp, cur_level);
7463   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
7464   auto [hy__value, hy__bdim] = unwrapTensorAtLevel(hy_, cur_level);
7465   auto [cy__value, cy__bdim] = unwrapTensorAtLevel(cy_, cur_level);
7466   auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level);
7467   std::optional<Tensor> grad_output_value;
7468   std::optional<int64_t> grad_output_bdim;
7469   if (grad_output) {
7470       std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
7471   }
7472   std::optional<Tensor> grad_hy_value;
7473   std::optional<int64_t> grad_hy_bdim;
7474   if (grad_hy) {
7475       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
7476   }
7477   std::optional<Tensor> grad_cy_value;
7478   std::optional<int64_t> grad_cy_bdim;
7479   if (grad_cy) {
7480       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
7481   }
7482   auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim);
7483   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level));
7484 }
7485 template <typename batch_rule_t, batch_rule_t batch_rule>
7486 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
7487   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7488   auto maybe_layer = maybeCurrentDynamicLayer();
7489   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7490   int64_t cur_level = maybe_layer->layerId();
7491   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
7492     return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
7493   }
7494   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7495   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7496   std::optional<Tensor> bias_value;
7497   std::optional<int64_t> bias_bdim;
7498   if (bias) {
7499       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7500   }
7501   std::optional<Tensor> running_mean_value;
7502   std::optional<int64_t> running_mean_bdim;
7503   if (running_mean) {
7504       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
7505   }
7506   std::optional<Tensor> running_var_value;
7507   std::optional<int64_t> running_var_bdim;
7508   if (running_var) {
7509       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
7510   }
7511   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
7512   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
7513 }
7514 template <typename batch_rule_t, batch_rule_t batch_rule>
7515 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon) {
7516   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7517   auto maybe_layer = maybeCurrentDynamicLayer();
7518   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7519   int64_t cur_level = maybe_layer->layerId();
7520   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) {
7521     return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
7522   }
7523   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7524   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
7525   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7526   std::optional<Tensor> running_mean_value;
7527   std::optional<int64_t> running_mean_bdim;
7528   if (running_mean) {
7529       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
7530   }
7531   std::optional<Tensor> running_var_value;
7532   std::optional<int64_t> running_var_bdim;
7533   if (running_var) {
7534       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
7535   }
7536   std::optional<Tensor> save_mean_value;
7537   std::optional<int64_t> save_mean_bdim;
7538   if (save_mean) {
7539       std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
7540   }
7541   std::optional<Tensor> save_var_value;
7542   std::optional<int64_t> save_var_bdim;
7543   if (save_var) {
7544       std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
7545   }
7546   auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon);
7547   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
7548 }
7549 template <typename batch_rule_t, batch_rule_t batch_rule>
7550 at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
7551   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7552   auto maybe_layer = maybeCurrentDynamicLayer();
7553   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7554   int64_t cur_level = maybe_layer->layerId();
7555   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7556     return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
7557   }
7558   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7559   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7560   std::optional<Tensor> bias_value;
7561   std::optional<int64_t> bias_bdim;
7562   if (bias) {
7563       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7564   }
7565   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
7566   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7567 }
7568 template <typename batch_rule_t, batch_rule_t batch_rule>
7569 at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
7570   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7571   auto maybe_layer = maybeCurrentDynamicLayer();
7572   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7573   int64_t cur_level = maybe_layer->layerId();
7574   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7575     return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
7576   }
7577   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7578   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7579   std::optional<Tensor> bias_value;
7580   std::optional<int64_t> bias_bdim;
7581   if (bias) {
7582       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7583   }
7584   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
7585   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7586 }
7587 template <typename batch_rule_t, batch_rule_t batch_rule>
7588 at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
7589   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7590   auto maybe_layer = maybeCurrentDynamicLayer();
7591   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7592   int64_t cur_level = maybe_layer->layerId();
7593   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7594     return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
7595   }
7596   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7597   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7598   std::optional<Tensor> bias_value;
7599   std::optional<int64_t> bias_bdim;
7600   if (bias) {
7601       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7602   }
7603   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
7604   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7605 }
7606 template <typename batch_rule_t, batch_rule_t batch_rule>
7607 at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
7608   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7609   auto maybe_layer = maybeCurrentDynamicLayer();
7610   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7611   int64_t cur_level = maybe_layer->layerId();
7612   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7613     return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
7614   }
7615   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7616   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7617   std::optional<Tensor> bias_value;
7618   std::optional<int64_t> bias_bdim;
7619   if (bias) {
7620       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7621   }
7622   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
7623   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7624 }
7625 template <typename batch_rule_t, batch_rule_t batch_rule>
7626 at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
7627   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7628   auto maybe_layer = maybeCurrentDynamicLayer();
7629   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7630   int64_t cur_level = maybe_layer->layerId();
7631   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7632     return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
7633   }
7634   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7635   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
7636   auto [z_value, z_bdim] = unwrapTensorAtLevel(z, cur_level);
7637   std::optional<Tensor> bias_value;
7638   std::optional<int64_t> bias_bdim;
7639   if (bias) {
7640       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7641   }
7642   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
7643   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7644 }
7645 template <typename batch_rule_t, batch_rule_t batch_rule>
7646 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
7647   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7648   auto maybe_layer = maybeCurrentDynamicLayer();
7649   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7650   int64_t cur_level = maybe_layer->layerId();
7651   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
7652     return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
7653   }
7654   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7655   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
7656   std::optional<Tensor> cx_value;
7657   std::optional<int64_t> cx_bdim;
7658   if (cx) {
7659       std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
7660   }
7661   std::optional<Tensor> dropout_state_value;
7662   std::optional<int64_t> dropout_state_bdim;
7663   if (dropout_state) {
7664       std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
7665   }
7666   auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
7667   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
7668 }
7669 template <typename batch_rule_t, batch_rule_t batch_rule>
7670 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
7671   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7672   auto maybe_layer = maybeCurrentDynamicLayer();
7673   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7674   int64_t cur_level = maybe_layer->layerId();
7675   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
7676     return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
7677   }
7678   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
7679   auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level);
7680   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
7681   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
7682   auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level);
7683   std::optional<Tensor> cx_value;
7684   std::optional<int64_t> cx_bdim;
7685   if (cx) {
7686       std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
7687   }
7688   std::optional<Tensor> grad_output_value;
7689   std::optional<int64_t> grad_output_bdim;
7690   if (grad_output) {
7691       std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
7692   }
7693   std::optional<Tensor> grad_hy_value;
7694   std::optional<int64_t> grad_hy_bdim;
7695   if (grad_hy) {
7696       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
7697   }
7698   std::optional<Tensor> grad_cy_value;
7699   std::optional<int64_t> grad_cy_bdim;
7700   if (grad_cy) {
7701       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
7702   }
7703   std::optional<Tensor> dropout_state_value;
7704   std::optional<int64_t> dropout_state_bdim;
7705   if (dropout_state) {
7706       std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
7707   }
7708   auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
7709   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
7710 }
7711 template <typename batch_rule_t, batch_rule_t batch_rule>
7712 at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
7713   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7714   auto maybe_layer = maybeCurrentDynamicLayer();
7715   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7716   int64_t cur_level = maybe_layer->layerId();
7717   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
7718     return at::_ops::mm::call(self, mat2);
7719   }
7720   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7721   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7722   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
7723   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7724 }
7725 template <typename batch_rule_t, batch_rule_t batch_rule>
7726 at::Tensor mm_dtype_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, at::ScalarType out_dtype) {
7727   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7728   auto maybe_layer = maybeCurrentDynamicLayer();
7729   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7730   int64_t cur_level = maybe_layer->layerId();
7731   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
7732     return at::_ops::mm_dtype::call(self, mat2, out_dtype);
7733   }
7734   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7735   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7736   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, out_dtype);
7737   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7738 }
7739 template <typename batch_rule_t, batch_rule_t batch_rule>
7740 at::Tensor _int_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
7741   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7742   auto maybe_layer = maybeCurrentDynamicLayer();
7743   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7744   int64_t cur_level = maybe_layer->layerId();
7745   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
7746     return at::_ops::_int_mm::call(self, mat2);
7747   }
7748   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7749   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7750   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
7751   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7752 }
7753 template <typename batch_rule_t, batch_rule_t batch_rule>
7754 at::Tensor _convert_weight_to_int4pack_generated_plumbing(const at::Tensor & self, int64_t innerKTiles) {
7755   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7756   auto maybe_layer = maybeCurrentDynamicLayer();
7757   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7758   int64_t cur_level = maybe_layer->layerId();
7759   if (!isBatchedAtLevel(self, cur_level)) {
7760     return at::_ops::_convert_weight_to_int4pack::call(self, innerKTiles);
7761   }
7762   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7763   auto results = batch_rule(self_value, self_bdim, innerKTiles);
7764   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7765 }
7766 template <typename batch_rule_t, batch_rule_t batch_rule>
7767 at::Tensor _weight_int4pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
7768   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7769   auto maybe_layer = maybeCurrentDynamicLayer();
7770   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7771   int64_t cur_level = maybe_layer->layerId();
7772   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(qScaleAndZeros, cur_level)) {
7773     return at::_ops::_weight_int4pack_mm::call(self, mat2, qGroupSize, qScaleAndZeros);
7774   }
7775   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7776   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7777   auto [qScaleAndZeros_value, qScaleAndZeros_bdim] = unwrapTensorAtLevel(qScaleAndZeros, cur_level);
7778   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, qGroupSize, qScaleAndZeros_value, qScaleAndZeros_bdim);
7779   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7780 }
7781 template <typename batch_rule_t, batch_rule_t batch_rule>
7782 at::Tensor _weight_int4pack_mm_with_scales_and_zeros_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScale, const at::Tensor & qZeros) {
7783   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7784   auto maybe_layer = maybeCurrentDynamicLayer();
7785   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7786   int64_t cur_level = maybe_layer->layerId();
7787   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(qScale, cur_level) && !isBatchedAtLevel(qZeros, cur_level)) {
7788     return at::_ops::_weight_int4pack_mm_with_scales_and_zeros::call(self, mat2, qGroupSize, qScale, qZeros);
7789   }
7790   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7791   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7792   auto [qScale_value, qScale_bdim] = unwrapTensorAtLevel(qScale, cur_level);
7793   auto [qZeros_value, qZeros_bdim] = unwrapTensorAtLevel(qZeros, cur_level);
7794   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, qGroupSize, qScale_value, qScale_bdim, qZeros_value, qZeros_bdim);
7795   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7796 }
7797 template <typename batch_rule_t, batch_rule_t batch_rule>
7798 at::Tensor _convert_weight_to_int4pack_for_cpu_generated_plumbing(const at::Tensor & self, int64_t innerKTiles) {
7799   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7800   auto maybe_layer = maybeCurrentDynamicLayer();
7801   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7802   int64_t cur_level = maybe_layer->layerId();
7803   if (!isBatchedAtLevel(self, cur_level)) {
7804     return at::_ops::_convert_weight_to_int4pack_for_cpu::call(self, innerKTiles);
7805   }
7806   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7807   auto results = batch_rule(self_value, self_bdim, innerKTiles);
7808   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7809 }
7810 template <typename batch_rule_t, batch_rule_t batch_rule>
7811 at::Tensor _weight_int4pack_mm_for_cpu_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
7812   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7813   auto maybe_layer = maybeCurrentDynamicLayer();
7814   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7815   int64_t cur_level = maybe_layer->layerId();
7816   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(qScaleAndZeros, cur_level)) {
7817     return at::_ops::_weight_int4pack_mm_for_cpu::call(self, mat2, qGroupSize, qScaleAndZeros);
7818   }
7819   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7820   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7821   auto [qScaleAndZeros_value, qScaleAndZeros_bdim] = unwrapTensorAtLevel(qScaleAndZeros, cur_level);
7822   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, qGroupSize, qScaleAndZeros_value, qScaleAndZeros_bdim);
7823   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7824 }
7825 template <typename batch_rule_t, batch_rule_t batch_rule>
7826 at::Tensor _dyn_quant_pack_4bit_weight_generated_plumbing(const at::Tensor & weights, const at::Tensor & scales_zeros, const ::std::optional<at::Tensor> & bias, int64_t block_size, int64_t in_features, int64_t out_features) {
7827   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7828   auto maybe_layer = maybeCurrentDynamicLayer();
7829   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7830   int64_t cur_level = maybe_layer->layerId();
7831   if (!isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(scales_zeros, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7832     return at::_ops::_dyn_quant_pack_4bit_weight::call(weights, scales_zeros, bias, block_size, in_features, out_features);
7833   }
7834   auto [weights_value, weights_bdim] = unwrapTensorAtLevel(weights, cur_level);
7835   auto [scales_zeros_value, scales_zeros_bdim] = unwrapTensorAtLevel(scales_zeros, cur_level);
7836   std::optional<Tensor> bias_value;
7837   std::optional<int64_t> bias_bdim;
7838   if (bias) {
7839       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7840   }
7841   auto results = batch_rule(weights_value, weights_bdim, scales_zeros_value, scales_zeros_bdim, bias_value, bias_bdim, block_size, in_features, out_features);
7842   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7843 }
7844 template <typename batch_rule_t, batch_rule_t batch_rule>
7845 at::Tensor _dyn_quant_matmul_4bit_generated_plumbing(const at::Tensor & inp, const at::Tensor & packed_weights, int64_t block_size, int64_t in_features, int64_t out_features) {
7846   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7847   auto maybe_layer = maybeCurrentDynamicLayer();
7848   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7849   int64_t cur_level = maybe_layer->layerId();
7850   if (!isBatchedAtLevel(inp, cur_level) && !isBatchedAtLevel(packed_weights, cur_level)) {
7851     return at::_ops::_dyn_quant_matmul_4bit::call(inp, packed_weights, block_size, in_features, out_features);
7852   }
7853   auto [inp_value, inp_bdim] = unwrapTensorAtLevel(inp, cur_level);
7854   auto [packed_weights_value, packed_weights_bdim] = unwrapTensorAtLevel(packed_weights, cur_level);
7855   auto results = batch_rule(inp_value, inp_bdim, packed_weights_value, packed_weights_bdim, block_size, in_features, out_features);
7856   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7857 }
7858 template <typename batch_rule_t, batch_rule_t batch_rule>
7859 at::Tensor _weight_int8pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) {
7860   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7861   auto maybe_layer = maybeCurrentDynamicLayer();
7862   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7863   int64_t cur_level = maybe_layer->layerId();
7864   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scales, cur_level)) {
7865     return at::_ops::_weight_int8pack_mm::call(self, mat2, scales);
7866   }
7867   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7868   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
7869   auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level);
7870   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scales_value, scales_bdim);
7871   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7872 }
7873 template <typename batch_rule_t, batch_rule_t batch_rule>
7874 at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) {
7875   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7876   auto maybe_layer = maybeCurrentDynamicLayer();
7877   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7878   int64_t cur_level = maybe_layer->layerId();
7879   if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
7880     return at::_ops::_sparse_mm::call(sparse, dense);
7881   }
7882   auto [sparse_value, sparse_bdim] = unwrapTensorAtLevel(sparse, cur_level);
7883   auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level);
7884   auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim);
7885   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7886 }
7887 template <typename batch_rule_t, batch_rule_t batch_rule>
7888 at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
7889   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7890   auto maybe_layer = maybeCurrentDynamicLayer();
7891   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7892   int64_t cur_level = maybe_layer->layerId();
7893   if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
7894     return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
7895   }
7896   auto [sparse_value, sparse_bdim] = unwrapTensorAtLevel(sparse, cur_level);
7897   auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level);
7898   auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce);
7899   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7900 }
7901 template <typename batch_rule_t, batch_rule_t batch_rule>
7902 at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7903   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7904   auto maybe_layer = maybeCurrentDynamicLayer();
7905   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7906   int64_t cur_level = maybe_layer->layerId();
7907   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7908     return at::_ops::_sparse_sparse_matmul::call(self, other);
7909   }
7910   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7911   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
7912   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7913   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7914 }
7915 template <typename batch_rule_t, batch_rule_t batch_rule>
7916 ::std::tuple<at::Tensor,at::Tensor> mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
7917   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7918   auto maybe_layer = maybeCurrentDynamicLayer();
7919   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7920   int64_t cur_level = maybe_layer->layerId();
7921   if (!isBatchedAtLevel(self, cur_level)) {
7922     return at::_ops::mode::call(self, dim, keepdim);
7923   }
7924   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7925   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7926   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7927 }
7928 template <typename batch_rule_t, batch_rule_t batch_rule>
7929 ::std::tuple<at::Tensor,at::Tensor> mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
7930   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7931   auto maybe_layer = maybeCurrentDynamicLayer();
7932   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7933   int64_t cur_level = maybe_layer->layerId();
7934   if (!isBatchedAtLevel(self, cur_level)) {
7935     return at::_ops::mode_dimname::call(self, dim, keepdim);
7936   }
7937   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7938   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7939   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7940 }
7941 template <typename batch_rule_t, batch_rule_t batch_rule>
7942 at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7943   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7944   auto maybe_layer = maybeCurrentDynamicLayer();
7945   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7946   int64_t cur_level = maybe_layer->layerId();
7947   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7948     return at::_ops::mul_Tensor::call(self, other);
7949   }
7950   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7951   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
7952   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7953   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7954 }
7955 template <typename batch_rule_t, batch_rule_t batch_rule>
7956 at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
7957   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7958   auto maybe_layer = maybeCurrentDynamicLayer();
7959   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7960   int64_t cur_level = maybe_layer->layerId();
7961   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7962     return at::_ops::mul__Tensor::call(self, other);
7963   }
7964   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7965   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
7966   batch_rule(self_value, self_bdim, other_value, other_bdim);
7967   return self;
7968 }
7969 template <typename batch_rule_t, batch_rule_t batch_rule>
7970 at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
7971   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7972   auto maybe_layer = maybeCurrentDynamicLayer();
7973   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7974   int64_t cur_level = maybe_layer->layerId();
7975   if (!isBatchedAtLevel(self, cur_level)) {
7976     return at::_ops::mul_Scalar::call(self, other);
7977   }
7978   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7979   auto results = batch_rule(self_value, self_bdim, other);
7980   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7981 }
7982 template <typename batch_rule_t, batch_rule_t batch_rule>
7983 at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
7984   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7985   auto maybe_layer = maybeCurrentDynamicLayer();
7986   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7987   int64_t cur_level = maybe_layer->layerId();
7988   if (!isBatchedAtLevel(self, cur_level)) {
7989     return at::_ops::mul__Scalar::call(self, other);
7990   }
7991   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
7992   batch_rule(self_value, self_bdim, other);
7993   return self;
7994 }
7995 template <typename batch_rule_t, batch_rule_t batch_rule>
7996 at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7997   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7998   auto maybe_layer = maybeCurrentDynamicLayer();
7999   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8000   int64_t cur_level = maybe_layer->layerId();
8001   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8002     return at::_ops::multiply_Tensor::call(self, other);
8003   }
8004   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8005   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
8006   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
8007   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8008 }
8009 template <typename batch_rule_t, batch_rule_t batch_rule>
8010 at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
8011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8012   auto maybe_layer = maybeCurrentDynamicLayer();
8013   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8014   int64_t cur_level = maybe_layer->layerId();
8015   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8016     return at::_ops::multiply__Tensor::call(self, other);
8017   }
8018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8019   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
8020   batch_rule(self_value, self_bdim, other_value, other_bdim);
8021   return self;
8022 }
8023 template <typename batch_rule_t, batch_rule_t batch_rule>
8024 at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
8025   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8026   auto maybe_layer = maybeCurrentDynamicLayer();
8027   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8028   int64_t cur_level = maybe_layer->layerId();
8029   if (!isBatchedAtLevel(self, cur_level)) {
8030     return at::_ops::multiply_Scalar::call(self, other);
8031   }
8032   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8033   auto results = batch_rule(self_value, self_bdim, other);
8034   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8035 }
8036 template <typename batch_rule_t, batch_rule_t batch_rule>
8037 at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
8038   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8039   auto maybe_layer = maybeCurrentDynamicLayer();
8040   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8041   int64_t cur_level = maybe_layer->layerId();
8042   if (!isBatchedAtLevel(self, cur_level)) {
8043     return at::_ops::multiply__Scalar::call(self, other);
8044   }
8045   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8046   batch_rule(self_value, self_bdim, other);
8047   return self;
8048 }
8049 template <typename batch_rule_t, batch_rule_t batch_rule>
8050 at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) {
8051   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8052   auto maybe_layer = maybeCurrentDynamicLayer();
8053   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8054   int64_t cur_level = maybe_layer->layerId();
8055   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
8056     return at::_ops::mv::call(self, vec);
8057   }
8058   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8059   auto [vec_value, vec_bdim] = unwrapTensorAtLevel(vec, cur_level);
8060   auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim);
8061   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8062 }
8063 template <typename batch_rule_t, batch_rule_t batch_rule>
8064 at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) {
8065   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8066   auto maybe_layer = maybeCurrentDynamicLayer();
8067   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8068   int64_t cur_level = maybe_layer->layerId();
8069   if (!isBatchedAtLevel(self, cur_level)) {
8070     return at::_ops::mvlgamma::call(self, p);
8071   }
8072   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8073   auto results = batch_rule(self_value, self_bdim, p);
8074   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8075 }
8076 template <typename batch_rule_t, batch_rule_t batch_rule>
8077 at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) {
8078   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8079   auto maybe_layer = maybeCurrentDynamicLayer();
8080   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8081   int64_t cur_level = maybe_layer->layerId();
8082   if (!isBatchedAtLevel(self, cur_level)) {
8083     return at::_ops::mvlgamma_::call(self, p);
8084   }
8085   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8086   batch_rule(self_value, self_bdim, p);
8087   return self;
8088 }
8089 template <typename batch_rule_t, batch_rule_t batch_rule>
8090 at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
8091   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8092   auto maybe_layer = maybeCurrentDynamicLayer();
8093   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8094   int64_t cur_level = maybe_layer->layerId();
8095   if (!isBatchedAtLevel(self, cur_level)) {
8096     return at::_ops::narrow_copy::call(self, dim, start, length);
8097   }
8098   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8099   auto results = batch_rule(self_value, self_bdim, dim, start, length);
8100   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8101 }
8102 template <typename batch_rule_t, batch_rule_t batch_rule>
8103 at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
8104   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8105   auto maybe_layer = maybeCurrentDynamicLayer();
8106   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8107   int64_t cur_level = maybe_layer->layerId();
8108   if (!isBatchedAtLevel(self, cur_level)) {
8109     return at::_ops::narrow::call(self, dim, start, length);
8110   }
8111   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8112   auto results = batch_rule(self_value, self_bdim, dim, start, length);
8113   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8114 }
8115 template <typename batch_rule_t, batch_rule_t batch_rule>
8116 at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
8117   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8118   auto maybe_layer = maybeCurrentDynamicLayer();
8119   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8120   int64_t cur_level = maybe_layer->layerId();
8121   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) {
8122     return at::_ops::narrow_Tensor::call(self, dim, start, length);
8123   }
8124   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8125   auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level);
8126   auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length);
8127   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8128 }
8129 template <typename batch_rule_t, batch_rule_t batch_rule>
8130 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
8131   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8132   auto maybe_layer = maybeCurrentDynamicLayer();
8133   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8134   int64_t cur_level = maybe_layer->layerId();
8135   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
8136     return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
8137   }
8138   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8139   std::optional<Tensor> weight_value;
8140   std::optional<int64_t> weight_bdim;
8141   if (weight) {
8142       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8143   }
8144   std::optional<Tensor> bias_value;
8145   std::optional<int64_t> bias_bdim;
8146   if (bias) {
8147       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8148   }
8149   std::optional<Tensor> running_mean_value;
8150   std::optional<int64_t> running_mean_bdim;
8151   if (running_mean) {
8152       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8153   }
8154   std::optional<Tensor> running_var_value;
8155   std::optional<int64_t> running_var_bdim;
8156   if (running_var) {
8157       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8158   }
8159   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
8160   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8161 }
8162 template <typename batch_rule_t, batch_rule_t batch_rule>
8163 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_training_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
8164   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8165   auto maybe_layer = maybeCurrentDynamicLayer();
8166   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8167   int64_t cur_level = maybe_layer->layerId();
8168   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
8169     return at::_ops::_native_batch_norm_legit_no_training::call(input, weight, bias, running_mean, running_var, momentum, eps);
8170   }
8171   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8172   auto [running_mean_value, running_mean_bdim] = unwrapTensorAtLevel(running_mean, cur_level);
8173   auto [running_var_value, running_var_bdim] = unwrapTensorAtLevel(running_var, cur_level);
8174   std::optional<Tensor> weight_value;
8175   std::optional<int64_t> weight_bdim;
8176   if (weight) {
8177       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8178   }
8179   std::optional<Tensor> bias_value;
8180   std::optional<int64_t> bias_bdim;
8181   if (bias) {
8182       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8183   }
8184   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps);
8185   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8186 }
8187 template <typename batch_rule_t, batch_rule_t batch_rule>
8188 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
8189   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8190   auto maybe_layer = maybeCurrentDynamicLayer();
8191   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8192   int64_t cur_level = maybe_layer->layerId();
8193   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8194     return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
8195   }
8196   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8197   std::optional<Tensor> weight_value;
8198   std::optional<int64_t> weight_bdim;
8199   if (weight) {
8200       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8201   }
8202   std::optional<Tensor> bias_value;
8203   std::optional<int64_t> bias_bdim;
8204   if (bias) {
8205       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8206   }
8207   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps);
8208   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8209 }
8210 template <typename batch_rule_t, batch_rule_t batch_rule>
8211 ::std::tuple<at::Tensor,at::Tensor> batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) {
8212   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8213   auto maybe_layer = maybeCurrentDynamicLayer();
8214   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8215   int64_t cur_level = maybe_layer->layerId();
8216   if (!isBatchedAtLevel(input, cur_level)) {
8217     return at::_ops::batch_norm_stats::call(input, eps);
8218   }
8219   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8220   auto results = batch_rule(input_value, input_bdim, eps);
8221   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8222 }
8223 template <typename batch_rule_t, batch_rule_t batch_rule>
8224 at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
8225   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8226   auto maybe_layer = maybeCurrentDynamicLayer();
8227   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8228   int64_t cur_level = maybe_layer->layerId();
8229   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) {
8230     return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
8231   }
8232   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8233   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
8234   auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level);
8235   std::optional<Tensor> weight_value;
8236   std::optional<int64_t> weight_bdim;
8237   if (weight) {
8238       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8239   }
8240   std::optional<Tensor> bias_value;
8241   std::optional<int64_t> bias_bdim;
8242   if (bias) {
8243       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8244   }
8245   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps);
8246   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8247 }
8248 template <typename batch_rule_t, batch_rule_t batch_rule>
8249 ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
8250   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8251   auto maybe_layer = maybeCurrentDynamicLayer();
8252   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8253   int64_t cur_level = maybe_layer->layerId();
8254   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
8255     return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
8256   }
8257   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8258   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
8259   auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level);
8260   std::optional<Tensor> running_mean_value;
8261   std::optional<int64_t> running_mean_bdim;
8262   if (running_mean) {
8263       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8264   }
8265   std::optional<Tensor> running_var_value;
8266   std::optional<int64_t> running_var_bdim;
8267   if (running_var) {
8268       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8269   }
8270   auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count);
8271   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8272 }
8273 template <typename batch_rule_t, batch_rule_t batch_rule>
8274 ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
8275   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8276   auto maybe_layer = maybeCurrentDynamicLayer();
8277   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8278   int64_t cur_level = maybe_layer->layerId();
8279   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) {
8280     return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
8281   }
8282   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8283   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
8284   auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level);
8285   auto [counts_value, counts_bdim] = unwrapTensorAtLevel(counts, cur_level);
8286   std::optional<Tensor> running_mean_value;
8287   std::optional<int64_t> running_mean_bdim;
8288   if (running_mean) {
8289       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8290   }
8291   std::optional<Tensor> running_var_value;
8292   std::optional<int64_t> running_var_bdim;
8293   if (running_var) {
8294       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8295   }
8296   auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim);
8297   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8298 }
8299 template <typename batch_rule_t, batch_rule_t batch_rule>
8300 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
8301   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8302   auto maybe_layer = maybeCurrentDynamicLayer();
8303   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8304   int64_t cur_level = maybe_layer->layerId();
8305   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) {
8306     return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
8307   }
8308   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
8309   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8310   std::optional<Tensor> weight_value;
8311   std::optional<int64_t> weight_bdim;
8312   if (weight) {
8313       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8314   }
8315   std::optional<Tensor> running_mean_value;
8316   std::optional<int64_t> running_mean_bdim;
8317   if (running_mean) {
8318       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8319   }
8320   std::optional<Tensor> running_var_value;
8321   std::optional<int64_t> running_var_bdim;
8322   if (running_var) {
8323       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8324   }
8325   std::optional<Tensor> save_mean_value;
8326   std::optional<int64_t> save_mean_bdim;
8327   if (save_mean) {
8328       std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
8329   }
8330   std::optional<Tensor> save_invstd_value;
8331   std::optional<int64_t> save_invstd_bdim;
8332   if (save_invstd) {
8333       std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level);
8334   }
8335   auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask);
8336   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8337 }
8338 template <typename batch_rule_t, batch_rule_t batch_rule>
8339 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
8340   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8341   auto maybe_layer = maybeCurrentDynamicLayer();
8342   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8343   int64_t cur_level = maybe_layer->layerId();
8344   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
8345     return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
8346   }
8347   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
8348   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8349   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
8350   auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level);
8351   std::optional<Tensor> weight_value;
8352   std::optional<int64_t> weight_bdim;
8353   if (weight) {
8354       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8355   }
8356   auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g);
8357   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
8358 }
8359 template <typename batch_rule_t, batch_rule_t batch_rule>
8360 at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) {
8361   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8362   auto maybe_layer = maybeCurrentDynamicLayer();
8363   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8364   int64_t cur_level = maybe_layer->layerId();
8365   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(sum_dy, cur_level) && !isBatchedAtLevel(sum_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) {
8366     return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
8367   }
8368   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
8369   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8370   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
8371   auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level);
8372   auto [sum_dy_value, sum_dy_bdim] = unwrapTensorAtLevel(sum_dy, cur_level);
8373   auto [sum_dy_xmu_value, sum_dy_xmu_bdim] = unwrapTensorAtLevel(sum_dy_xmu, cur_level);
8374   auto [count_value, count_bdim] = unwrapTensorAtLevel(count, cur_level);
8375   std::optional<Tensor> weight_value;
8376   std::optional<int64_t> weight_bdim;
8377   if (weight) {
8378       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
8379   }
8380   auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, sum_dy_value, sum_dy_bdim, sum_dy_xmu_value, sum_dy_xmu_bdim, count_value, count_bdim);
8381   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8382 }
8383 template <typename batch_rule_t, batch_rule_t batch_rule>
8384 ::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum) {
8385   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8386   auto maybe_layer = maybeCurrentDynamicLayer();
8387   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8388   int64_t cur_level = maybe_layer->layerId();
8389   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
8390     return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum);
8391   }
8392   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8393   std::optional<Tensor> running_mean_value;
8394   std::optional<int64_t> running_mean_bdim;
8395   if (running_mean) {
8396       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8397   }
8398   std::optional<Tensor> running_var_value;
8399   std::optional<int64_t> running_var_bdim;
8400   if (running_var) {
8401       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8402   }
8403   auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum);
8404   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8405 }
8406 template <typename batch_rule_t, batch_rule_t batch_rule>
8407 at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride) {
8408   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8409   auto maybe_layer = maybeCurrentDynamicLayer();
8410   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8411   int64_t cur_level = maybe_layer->layerId();
8412   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8413     return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
8414   }
8415   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8416   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
8417   std::optional<Tensor> bias_value;
8418   std::optional<int64_t> bias_bdim;
8419   if (bias) {
8420       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8421   }
8422   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride);
8423   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8424 }
8425 template <typename batch_rule_t, batch_rule_t batch_rule>
8426 at::Tensor ones_like_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
8427   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8428   auto maybe_layer = maybeCurrentDynamicLayer();
8429   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8430   int64_t cur_level = maybe_layer->layerId();
8431   if (!isBatchedAtLevel(self, cur_level)) {
8432     return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format);
8433   }
8434   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8435   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
8436   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8437 }
8438 template <typename batch_rule_t, batch_rule_t batch_rule>
8439 at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
8440   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8441   auto maybe_layer = maybeCurrentDynamicLayer();
8442   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8443   int64_t cur_level = maybe_layer->layerId();
8444   if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
8445     return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
8446   }
8447   auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level);
8448   auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level);
8449   auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim);
8450   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8451 }
8452 template <typename batch_rule_t, batch_rule_t batch_rule>
8453 at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
8454   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8455   auto maybe_layer = maybeCurrentDynamicLayer();
8456   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8457   int64_t cur_level = maybe_layer->layerId();
8458   if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
8459     return at::_ops::cdist::call(x1, x2, p, compute_mode);
8460   }
8461   auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level);
8462   auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level);
8463   auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
8464   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8465 }
8466 template <typename batch_rule_t, batch_rule_t batch_rule>
8467 at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) {
8468   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8469   auto maybe_layer = maybeCurrentDynamicLayer();
8470   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8471   int64_t cur_level = maybe_layer->layerId();
8472   if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
8473     return at::_ops::_euclidean_dist::call(x1, x2);
8474   }
8475   auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level);
8476   auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level);
8477   auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim);
8478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8479 }
8480 template <typename batch_rule_t, batch_rule_t batch_rule>
8481 at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
8482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8483   auto maybe_layer = maybeCurrentDynamicLayer();
8484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8485   int64_t cur_level = maybe_layer->layerId();
8486   if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
8487     return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode);
8488   }
8489   auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level);
8490   auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level);
8491   auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
8492   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8493 }
8494 template <typename batch_rule_t, batch_rule_t batch_rule>
8495 at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
8496   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8497   auto maybe_layer = maybeCurrentDynamicLayer();
8498   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8499   int64_t cur_level = maybe_layer->layerId();
8500   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) {
8501     return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
8502   }
8503   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
8504   auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level);
8505   auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level);
8506   auto [cdist_value, cdist_bdim] = unwrapTensorAtLevel(cdist, cur_level);
8507   auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim);
8508   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8509 }
8510 template <typename batch_rule_t, batch_rule_t batch_rule>
8511 at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) {
8512   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8513   auto maybe_layer = maybeCurrentDynamicLayer();
8514   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8515   int64_t cur_level = maybe_layer->layerId();
8516   if (!isBatchedAtLevel(self, cur_level)) {
8517     return at::_ops::pdist::call(self, p);
8518   }
8519   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8520   auto results = batch_rule(self_value, self_bdim, p);
8521   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8522 }
8523 template <typename batch_rule_t, batch_rule_t batch_rule>
8524 at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) {
8525   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8526   auto maybe_layer = maybeCurrentDynamicLayer();
8527   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8528   int64_t cur_level = maybe_layer->layerId();
8529   if (!isBatchedAtLevel(self, cur_level)) {
8530     return at::_ops::_pdist_forward::call(self, p);
8531   }
8532   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8533   auto results = batch_rule(self_value, self_bdim, p);
8534   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8535 }
8536 template <typename batch_rule_t, batch_rule_t batch_rule>
8537 at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
8538   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8539   auto maybe_layer = maybeCurrentDynamicLayer();
8540   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8541   int64_t cur_level = maybe_layer->layerId();
8542   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) {
8543     return at::_ops::_pdist_backward::call(grad, self, p, pdist);
8544   }
8545   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
8546   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8547   auto [pdist_value, pdist_bdim] = unwrapTensorAtLevel(pdist, cur_level);
8548   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim);
8549   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8550 }
8551 template <typename batch_rule_t, batch_rule_t batch_rule>
8552 at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
8553   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8554   auto maybe_layer = maybeCurrentDynamicLayer();
8555   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8556   int64_t cur_level = maybe_layer->layerId();
8557   if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
8558     return at::_ops::cosine_similarity::call(x1, x2, dim, eps);
8559   }
8560   auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level);
8561   auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level);
8562   auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps);
8563   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8564 }
8565 template <typename batch_rule_t, batch_rule_t batch_rule>
8566 at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
8567   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8568   auto maybe_layer = maybeCurrentDynamicLayer();
8569   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8570   int64_t cur_level = maybe_layer->layerId();
8571   if (!isBatchedAtLevel(self, cur_level)) {
8572     return at::_ops::permute::call(self, dims);
8573   }
8574   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8575   auto results = batch_rule(self_value, self_bdim, dims);
8576   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8577 }
8578 template <typename batch_rule_t, batch_rule_t batch_rule>
8579 at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
8580   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8581   auto maybe_layer = maybeCurrentDynamicLayer();
8582   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8583   int64_t cur_level = maybe_layer->layerId();
8584   if (!isBatchedAtLevel(self, cur_level)) {
8585     return at::_ops::movedim_intlist::call(self, source, destination);
8586   }
8587   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8588   auto results = batch_rule(self_value, self_bdim, source, destination);
8589   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8590 }
8591 template <typename batch_rule_t, batch_rule_t batch_rule>
8592 at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
8593   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8594   auto maybe_layer = maybeCurrentDynamicLayer();
8595   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8596   int64_t cur_level = maybe_layer->layerId();
8597   if (!isBatchedAtLevel(self, cur_level)) {
8598     return at::_ops::movedim_int::call(self, source, destination);
8599   }
8600   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8601   auto results = batch_rule(self_value, self_bdim, source, destination);
8602   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8603 }
8604 template <typename batch_rule_t, batch_rule_t batch_rule>
8605 at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
8606   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8607   auto maybe_layer = maybeCurrentDynamicLayer();
8608   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8609   int64_t cur_level = maybe_layer->layerId();
8610   if (!isBatchedAtLevel(self, cur_level)) {
8611     return at::_ops::moveaxis_intlist::call(self, source, destination);
8612   }
8613   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8614   auto results = batch_rule(self_value, self_bdim, source, destination);
8615   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8616 }
8617 template <typename batch_rule_t, batch_rule_t batch_rule>
8618 at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
8619   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8620   auto maybe_layer = maybeCurrentDynamicLayer();
8621   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8622   int64_t cur_level = maybe_layer->layerId();
8623   if (!isBatchedAtLevel(self, cur_level)) {
8624     return at::_ops::moveaxis_int::call(self, source, destination);
8625   }
8626   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8627   auto results = batch_rule(self_value, self_bdim, source, destination);
8628   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8629 }
8630 template <typename batch_rule_t, batch_rule_t batch_rule>
8631 at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) {
8632   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8633   auto maybe_layer = maybeCurrentDynamicLayer();
8634   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8635   int64_t cur_level = maybe_layer->layerId();
8636   if (!isBatchedAtLevel(self, cur_level)) {
8637     return at::_ops::numpy_T::call(self);
8638   }
8639   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8640   auto results = batch_rule(self_value, self_bdim);
8641   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8642 }
8643 template <typename batch_rule_t, batch_rule_t batch_rule>
8644 at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) {
8645   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8646   auto maybe_layer = maybeCurrentDynamicLayer();
8647   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8648   int64_t cur_level = maybe_layer->layerId();
8649   if (!isBatchedAtLevel(self, cur_level)) {
8650     return at::_ops::matrix_H::call(self);
8651   }
8652   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8653   auto results = batch_rule(self_value, self_bdim);
8654   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8655 }
8656 template <typename batch_rule_t, batch_rule_t batch_rule>
8657 at::Tensor mT_generated_plumbing(const at::Tensor & self) {
8658   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8659   auto maybe_layer = maybeCurrentDynamicLayer();
8660   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8661   int64_t cur_level = maybe_layer->layerId();
8662   if (!isBatchedAtLevel(self, cur_level)) {
8663     return at::_ops::mT::call(self);
8664   }
8665   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8666   auto results = batch_rule(self_value, self_bdim);
8667   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8668 }
8669 template <typename batch_rule_t, batch_rule_t batch_rule>
8670 at::Tensor mH_generated_plumbing(const at::Tensor & self) {
8671   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8672   auto maybe_layer = maybeCurrentDynamicLayer();
8673   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8674   int64_t cur_level = maybe_layer->layerId();
8675   if (!isBatchedAtLevel(self, cur_level)) {
8676     return at::_ops::mH::call(self);
8677   }
8678   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8679   auto results = batch_rule(self_value, self_bdim);
8680   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8681 }
8682 template <typename batch_rule_t, batch_rule_t batch_rule>
8683 at::Tensor adjoint_generated_plumbing(const at::Tensor & self) {
8684   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8685   auto maybe_layer = maybeCurrentDynamicLayer();
8686   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8687   int64_t cur_level = maybe_layer->layerId();
8688   if (!isBatchedAtLevel(self, cur_level)) {
8689     return at::_ops::adjoint::call(self);
8690   }
8691   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8692   auto results = batch_rule(self_value, self_bdim);
8693   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8694 }
8695 template <typename batch_rule_t, batch_rule_t batch_rule>
8696 at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) {
8697   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8698   auto maybe_layer = maybeCurrentDynamicLayer();
8699   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8700   int64_t cur_level = maybe_layer->layerId();
8701   if (!isBatchedAtLevel(self, cur_level)) {
8702     return at::_ops::pixel_shuffle::call(self, upscale_factor);
8703   }
8704   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8705   auto results = batch_rule(self_value, self_bdim, upscale_factor);
8706   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8707 }
8708 template <typename batch_rule_t, batch_rule_t batch_rule>
8709 at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) {
8710   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8711   auto maybe_layer = maybeCurrentDynamicLayer();
8712   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8713   int64_t cur_level = maybe_layer->layerId();
8714   if (!isBatchedAtLevel(self, cur_level)) {
8715     return at::_ops::pixel_unshuffle::call(self, downscale_factor);
8716   }
8717   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8718   auto results = batch_rule(self_value, self_bdim, downscale_factor);
8719   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8720 }
8721 template <typename batch_rule_t, batch_rule_t batch_rule>
8722 at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) {
8723   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8724   auto maybe_layer = maybeCurrentDynamicLayer();
8725   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8726   int64_t cur_level = maybe_layer->layerId();
8727   if (!isBatchedAtLevel(self, cur_level)) {
8728     return at::_ops::channel_shuffle::call(self, groups);
8729   }
8730   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8731   auto results = batch_rule(self_value, self_bdim, groups);
8732   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8733 }
8734 template <typename batch_rule_t, batch_rule_t batch_rule>
8735 at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) {
8736   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8737   auto maybe_layer = maybeCurrentDynamicLayer();
8738   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8739   int64_t cur_level = maybe_layer->layerId();
8740   if (!isBatchedAtLevel(self, cur_level)) {
8741     return at::_ops::native_channel_shuffle::call(self, groups);
8742   }
8743   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8744   auto results = batch_rule(self_value, self_bdim, groups);
8745   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8746 }
8747 template <typename batch_rule_t, batch_rule_t batch_rule>
8748 at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, ::std::optional<at::Device> device) {
8749   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8750   auto maybe_layer = maybeCurrentDynamicLayer();
8751   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8752   int64_t cur_level = maybe_layer->layerId();
8753   if (!isBatchedAtLevel(self, cur_level)) {
8754     return at::_ops::pin_memory::call(self, device);
8755   }
8756   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8757   auto results = batch_rule(self_value, self_bdim, device);
8758   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8759 }
8760 template <typename batch_rule_t, batch_rule_t batch_rule>
8761 at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, ::std::optional<at::Device> device) {
8762   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8763   auto maybe_layer = maybeCurrentDynamicLayer();
8764   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8765   int64_t cur_level = maybe_layer->layerId();
8766   if (!isBatchedAtLevel(self, cur_level)) {
8767     return at::_ops::_pin_memory::call(self, device);
8768   }
8769   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8770   auto results = batch_rule(self_value, self_bdim, device);
8771   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8772 }
8773 template <typename batch_rule_t, batch_rule_t batch_rule>
8774 at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) {
8775   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8776   auto maybe_layer = maybeCurrentDynamicLayer();
8777   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8778   int64_t cur_level = maybe_layer->layerId();
8779   if (!isBatchedAtLevel(self, cur_level)) {
8780     return at::_ops::pinverse::call(self, rcond);
8781   }
8782   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8783   auto results = batch_rule(self_value, self_bdim, rcond);
8784   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8785 }
8786 template <typename batch_rule_t, batch_rule_t batch_rule>
8787 at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
8788   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8789   auto maybe_layer = maybeCurrentDynamicLayer();
8790   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8791   int64_t cur_level = maybe_layer->layerId();
8792   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) {
8793     return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction);
8794   }
8795   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
8796   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
8797   auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction);
8798   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8799 }
8800 template <typename batch_rule_t, batch_rule_t batch_rule>
8801 at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) {
8802   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8803   auto maybe_layer = maybeCurrentDynamicLayer();
8804   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8805   int64_t cur_level = maybe_layer->layerId();
8806   if (!isBatchedAtLevel(self, cur_level)) {
8807     return at::_ops::rad2deg::call(self);
8808   }
8809   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8810   auto results = batch_rule(self_value, self_bdim);
8811   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8812 }
8813 template <typename batch_rule_t, batch_rule_t batch_rule>
8814 at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) {
8815   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8816   auto maybe_layer = maybeCurrentDynamicLayer();
8817   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8818   int64_t cur_level = maybe_layer->layerId();
8819   if (!isBatchedAtLevel(self, cur_level)) {
8820     return at::_ops::rad2deg_::call(self);
8821   }
8822   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8823   batch_rule(self_value, self_bdim);
8824   return self;
8825 }
8826 template <typename batch_rule_t, batch_rule_t batch_rule>
8827 at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) {
8828   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8829   auto maybe_layer = maybeCurrentDynamicLayer();
8830   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8831   int64_t cur_level = maybe_layer->layerId();
8832   if (!isBatchedAtLevel(self, cur_level)) {
8833     return at::_ops::deg2rad::call(self);
8834   }
8835   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8836   auto results = batch_rule(self_value, self_bdim);
8837   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8838 }
8839 template <typename batch_rule_t, batch_rule_t batch_rule>
8840 at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) {
8841   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8842   auto maybe_layer = maybeCurrentDynamicLayer();
8843   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8844   int64_t cur_level = maybe_layer->layerId();
8845   if (!isBatchedAtLevel(self, cur_level)) {
8846     return at::_ops::deg2rad_::call(self);
8847   }
8848   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8849   batch_rule(self_value, self_bdim);
8850   return self;
8851 }
8852 template <typename batch_rule_t, batch_rule_t batch_rule>
8853 at::Tensor rand_like_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
8854   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8855   auto maybe_layer = maybeCurrentDynamicLayer();
8856   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8857   int64_t cur_level = maybe_layer->layerId();
8858   if (!isBatchedAtLevel(self, cur_level)) {
8859     return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
8860   }
8861   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8862   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
8863   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8864 }
8865 template <typename batch_rule_t, batch_rule_t batch_rule>
8866 at::Tensor randint_like_generated_plumbing(const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
8867   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8868   auto maybe_layer = maybeCurrentDynamicLayer();
8869   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8870   int64_t cur_level = maybe_layer->layerId();
8871   if (!isBatchedAtLevel(self, cur_level)) {
8872     return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
8873   }
8874   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8875   auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format);
8876   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8877 }
8878 template <typename batch_rule_t, batch_rule_t batch_rule>
8879 at::Tensor randint_like_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
8880   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8881   auto maybe_layer = maybeCurrentDynamicLayer();
8882   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8883   int64_t cur_level = maybe_layer->layerId();
8884   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(high, cur_level)) {
8885     return at::_ops::randint_like_Tensor::call(self, high, dtype, layout, device, pin_memory, memory_format);
8886   }
8887   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8888   auto [high_value, high_bdim] = unwrapTensorAtLevel(high, cur_level);
8889   auto results = batch_rule(self_value, self_bdim, high_value, high_bdim, dtype, layout, device, pin_memory, memory_format);
8890   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8891 }
8892 template <typename batch_rule_t, batch_rule_t batch_rule>
8893 at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
8894   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8895   auto maybe_layer = maybeCurrentDynamicLayer();
8896   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8897   int64_t cur_level = maybe_layer->layerId();
8898   if (!isBatchedAtLevel(self, cur_level)) {
8899     return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
8900   }
8901   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8902   auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format);
8903   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8904 }
8905 template <typename batch_rule_t, batch_rule_t batch_rule>
8906 at::Tensor randn_like_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
8907   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8908   auto maybe_layer = maybeCurrentDynamicLayer();
8909   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8910   int64_t cur_level = maybe_layer->layerId();
8911   if (!isBatchedAtLevel(self, cur_level)) {
8912     return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
8913   }
8914   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8915   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
8916   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8917 }
8918 template <typename batch_rule_t, batch_rule_t batch_rule>
8919 at::Tensor ravel_generated_plumbing(const at::Tensor & self) {
8920   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8921   auto maybe_layer = maybeCurrentDynamicLayer();
8922   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8923   int64_t cur_level = maybe_layer->layerId();
8924   if (!isBatchedAtLevel(self, cur_level)) {
8925     return at::_ops::ravel::call(self);
8926   }
8927   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8928   auto results = batch_rule(self_value, self_bdim);
8929   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8930 }
8931 template <typename batch_rule_t, batch_rule_t batch_rule>
8932 at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) {
8933   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8934   auto maybe_layer = maybeCurrentDynamicLayer();
8935   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8936   int64_t cur_level = maybe_layer->layerId();
8937   if (!isBatchedAtLevel(self, cur_level)) {
8938     return at::_ops::reciprocal::call(self);
8939   }
8940   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8941   auto results = batch_rule(self_value, self_bdim);
8942   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8943 }
8944 template <typename batch_rule_t, batch_rule_t batch_rule>
8945 at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) {
8946   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8947   auto maybe_layer = maybeCurrentDynamicLayer();
8948   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8949   int64_t cur_level = maybe_layer->layerId();
8950   if (!isBatchedAtLevel(self, cur_level)) {
8951     return at::_ops::reciprocal_::call(self);
8952   }
8953   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8954   batch_rule(self_value, self_bdim);
8955   return self;
8956 }
8957 template <typename batch_rule_t, batch_rule_t batch_rule>
8958 at::Tensor neg_generated_plumbing(const at::Tensor & self) {
8959   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8960   auto maybe_layer = maybeCurrentDynamicLayer();
8961   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8962   int64_t cur_level = maybe_layer->layerId();
8963   if (!isBatchedAtLevel(self, cur_level)) {
8964     return at::_ops::neg::call(self);
8965   }
8966   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8967   auto results = batch_rule(self_value, self_bdim);
8968   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8969 }
8970 template <typename batch_rule_t, batch_rule_t batch_rule>
8971 at::Tensor & neg__generated_plumbing(at::Tensor & self) {
8972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8973   auto maybe_layer = maybeCurrentDynamicLayer();
8974   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8975   int64_t cur_level = maybe_layer->layerId();
8976   if (!isBatchedAtLevel(self, cur_level)) {
8977     return at::_ops::neg_::call(self);
8978   }
8979   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8980   batch_rule(self_value, self_bdim);
8981   return self;
8982 }
8983 template <typename batch_rule_t, batch_rule_t batch_rule>
8984 at::Tensor negative_generated_plumbing(const at::Tensor & self) {
8985   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8986   auto maybe_layer = maybeCurrentDynamicLayer();
8987   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8988   int64_t cur_level = maybe_layer->layerId();
8989   if (!isBatchedAtLevel(self, cur_level)) {
8990     return at::_ops::negative::call(self);
8991   }
8992   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
8993   auto results = batch_rule(self_value, self_bdim);
8994   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8995 }
8996 template <typename batch_rule_t, batch_rule_t batch_rule>
8997 at::Tensor & negative__generated_plumbing(at::Tensor & self) {
8998   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8999   auto maybe_layer = maybeCurrentDynamicLayer();
9000   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9001   int64_t cur_level = maybe_layer->layerId();
9002   if (!isBatchedAtLevel(self, cur_level)) {
9003     return at::_ops::negative_::call(self);
9004   }
9005   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9006   batch_rule(self_value, self_bdim);
9007   return self;
9008 }
9009 template <typename batch_rule_t, batch_rule_t batch_rule>
9010 at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) {
9011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9012   auto maybe_layer = maybeCurrentDynamicLayer();
9013   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9014   int64_t cur_level = maybe_layer->layerId();
9015   if (!isBatchedAtLevel(self, cur_level)) {
9016     return at::_ops::repeat::call(self, repeats);
9017   }
9018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9019   auto results = batch_rule(self_value, self_bdim, repeats);
9020   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9021 }
9022 template <typename batch_rule_t, batch_rule_t batch_rule>
9023 at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
9024   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9025   auto maybe_layer = maybeCurrentDynamicLayer();
9026   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9027   int64_t cur_level = maybe_layer->layerId();
9028   if (!isBatchedAtLevel(repeats, cur_level)) {
9029     return at::_ops::repeat_interleave_Tensor::call(repeats, output_size);
9030   }
9031   auto [repeats_value, repeats_bdim] = unwrapTensorAtLevel(repeats, cur_level);
9032   auto results = batch_rule(repeats_value, repeats_bdim, output_size);
9033   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9034 }
9035 template <typename batch_rule_t, batch_rule_t batch_rule>
9036 at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
9037   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9038   auto maybe_layer = maybeCurrentDynamicLayer();
9039   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9040   int64_t cur_level = maybe_layer->layerId();
9041   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) {
9042     return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size);
9043   }
9044   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9045   auto [repeats_value, repeats_bdim] = unwrapTensorAtLevel(repeats, cur_level);
9046   auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size);
9047   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9048 }
9049 template <typename batch_rule_t, batch_rule_t batch_rule>
9050 at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
9051   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9052   auto maybe_layer = maybeCurrentDynamicLayer();
9053   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9054   int64_t cur_level = maybe_layer->layerId();
9055   if (!isBatchedAtLevel(self, cur_level)) {
9056     return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
9057   }
9058   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9059   auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size);
9060   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9061 }
9062 template <typename batch_rule_t, batch_rule_t batch_rule>
9063 at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) {
9064   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9065   auto maybe_layer = maybeCurrentDynamicLayer();
9066   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9067   int64_t cur_level = maybe_layer->layerId();
9068   if (!isBatchedAtLevel(self, cur_level)) {
9069     return at::_ops::reshape::call(self, shape);
9070   }
9071   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9072   auto results = batch_rule(self_value, self_bdim, shape);
9073   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9074 }
9075 template <typename batch_rule_t, batch_rule_t batch_rule>
9076 at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
9077   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9078   auto maybe_layer = maybeCurrentDynamicLayer();
9079   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9080   int64_t cur_level = maybe_layer->layerId();
9081   if (!isBatchedAtLevel(self, cur_level)) {
9082     return at::_ops::_reshape_copy::call(self, size);
9083   }
9084   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9085   auto results = batch_rule(self_value, self_bdim, size);
9086   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9087 }
9088 template <typename batch_rule_t, batch_rule_t batch_rule>
9089 at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
9090   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9091   auto maybe_layer = maybeCurrentDynamicLayer();
9092   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9093   int64_t cur_level = maybe_layer->layerId();
9094   if (!isBatchedAtLevel(self, cur_level)) {
9095     return at::_ops::_reshape_alias::call(self, size, stride);
9096   }
9097   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9098   auto results = batch_rule(self_value, self_bdim, size, stride);
9099   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9100 }
9101 template <typename batch_rule_t, batch_rule_t batch_rule>
9102 at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) {
9103   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9104   auto maybe_layer = maybeCurrentDynamicLayer();
9105   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9106   int64_t cur_level = maybe_layer->layerId();
9107   if (!isBatchedAtLevel(self, cur_level)) {
9108     return at::_ops::_mkldnn_reshape::call(self, shape);
9109   }
9110   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9111   auto results = batch_rule(self_value, self_bdim, shape);
9112   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9113 }
9114 template <typename batch_rule_t, batch_rule_t batch_rule>
9115 at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
9116   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9117   auto maybe_layer = maybeCurrentDynamicLayer();
9118   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9119   int64_t cur_level = maybe_layer->layerId();
9120   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
9121     return at::_ops::reshape_as::call(self, other);
9122   }
9123   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9124   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
9125   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
9126   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9127 }
9128 template <typename batch_rule_t, batch_rule_t batch_rule>
9129 at::Tensor round_generated_plumbing(const at::Tensor & self) {
9130   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9131   auto maybe_layer = maybeCurrentDynamicLayer();
9132   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9133   int64_t cur_level = maybe_layer->layerId();
9134   if (!isBatchedAtLevel(self, cur_level)) {
9135     return at::_ops::round::call(self);
9136   }
9137   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9138   auto results = batch_rule(self_value, self_bdim);
9139   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9140 }
9141 template <typename batch_rule_t, batch_rule_t batch_rule>
9142 at::Tensor & round__generated_plumbing(at::Tensor & self) {
9143   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9144   auto maybe_layer = maybeCurrentDynamicLayer();
9145   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9146   int64_t cur_level = maybe_layer->layerId();
9147   if (!isBatchedAtLevel(self, cur_level)) {
9148     return at::_ops::round_::call(self);
9149   }
9150   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9151   batch_rule(self_value, self_bdim);
9152   return self;
9153 }
9154 template <typename batch_rule_t, batch_rule_t batch_rule>
9155 at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) {
9156   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9157   auto maybe_layer = maybeCurrentDynamicLayer();
9158   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9159   int64_t cur_level = maybe_layer->layerId();
9160   if (!isBatchedAtLevel(self, cur_level)) {
9161     return at::_ops::round_decimals::call(self, decimals);
9162   }
9163   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9164   auto results = batch_rule(self_value, self_bdim, decimals);
9165   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9166 }
9167 template <typename batch_rule_t, batch_rule_t batch_rule>
9168 at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) {
9169   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9170   auto maybe_layer = maybeCurrentDynamicLayer();
9171   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9172   int64_t cur_level = maybe_layer->layerId();
9173   if (!isBatchedAtLevel(self, cur_level)) {
9174     return at::_ops::round__decimals::call(self, decimals);
9175   }
9176   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9177   batch_rule(self_value, self_bdim, decimals);
9178   return self;
9179 }
9180 template <typename batch_rule_t, batch_rule_t batch_rule>
9181 at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
9182   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9183   auto maybe_layer = maybeCurrentDynamicLayer();
9184   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9185   int64_t cur_level = maybe_layer->layerId();
9186   if (!isBatchedAtLevel(self, cur_level)) {
9187     return at::_ops::rrelu::call(self, lower, upper, training, generator);
9188   }
9189   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9190   auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator);
9191   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9192 }
9193 template <typename batch_rule_t, batch_rule_t batch_rule>
9194 at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
9195   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9196   auto maybe_layer = maybeCurrentDynamicLayer();
9197   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9198   int64_t cur_level = maybe_layer->layerId();
9199   if (!isBatchedAtLevel(self, cur_level)) {
9200     return at::_ops::rrelu_::call(self, lower, upper, training, generator);
9201   }
9202   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9203   batch_rule(self_value, self_bdim, lower, upper, training, generator);
9204   return self;
9205 }
9206 template <typename batch_rule_t, batch_rule_t batch_rule>
9207 at::Tensor relu_generated_plumbing(const at::Tensor & self) {
9208   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9209   auto maybe_layer = maybeCurrentDynamicLayer();
9210   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9211   int64_t cur_level = maybe_layer->layerId();
9212   if (!isBatchedAtLevel(self, cur_level)) {
9213     return at::_ops::relu::call(self);
9214   }
9215   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9216   auto results = batch_rule(self_value, self_bdim);
9217   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9218 }
9219 template <typename batch_rule_t, batch_rule_t batch_rule>
9220 at::Tensor & relu__generated_plumbing(at::Tensor & self) {
9221   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9222   auto maybe_layer = maybeCurrentDynamicLayer();
9223   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9224   int64_t cur_level = maybe_layer->layerId();
9225   if (!isBatchedAtLevel(self, cur_level)) {
9226     return at::_ops::relu_::call(self);
9227   }
9228   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9229   batch_rule(self_value, self_bdim);
9230   return self;
9231 }
9232 template <typename batch_rule_t, batch_rule_t batch_rule>
9233 at::Tensor relu6_generated_plumbing(const at::Tensor & self) {
9234   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9235   auto maybe_layer = maybeCurrentDynamicLayer();
9236   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9237   int64_t cur_level = maybe_layer->layerId();
9238   if (!isBatchedAtLevel(self, cur_level)) {
9239     return at::_ops::relu6::call(self);
9240   }
9241   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9242   auto results = batch_rule(self_value, self_bdim);
9243   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9244 }
9245 template <typename batch_rule_t, batch_rule_t batch_rule>
9246 at::Tensor & relu6__generated_plumbing(at::Tensor & self) {
9247   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9248   auto maybe_layer = maybeCurrentDynamicLayer();
9249   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9250   int64_t cur_level = maybe_layer->layerId();
9251   if (!isBatchedAtLevel(self, cur_level)) {
9252     return at::_ops::relu6_::call(self);
9253   }
9254   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9255   batch_rule(self_value, self_bdim);
9256   return self;
9257 }
9258 template <typename batch_rule_t, batch_rule_t batch_rule>
9259 at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
9260   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9261   auto maybe_layer = maybeCurrentDynamicLayer();
9262   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9263   int64_t cur_level = maybe_layer->layerId();
9264   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
9265     return at::_ops::prelu::call(self, weight);
9266   }
9267   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9268   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
9269   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
9270   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9271 }
9272 template <typename batch_rule_t, batch_rule_t batch_rule>
9273 at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
9274   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9275   auto maybe_layer = maybeCurrentDynamicLayer();
9276   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9277   int64_t cur_level = maybe_layer->layerId();
9278   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
9279     return at::_ops::_prelu_kernel::call(self, weight);
9280   }
9281   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9282   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
9283   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
9284   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9285 }
9286 template <typename batch_rule_t, batch_rule_t batch_rule>
9287 ::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
9288   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9289   auto maybe_layer = maybeCurrentDynamicLayer();
9290   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9291   int64_t cur_level = maybe_layer->layerId();
9292   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
9293     return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight);
9294   }
9295   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9296   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9297   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
9298   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim);
9299   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
9300 }
9301 template <typename batch_rule_t, batch_rule_t batch_rule>
9302 at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) {
9303   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9304   auto maybe_layer = maybeCurrentDynamicLayer();
9305   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9306   int64_t cur_level = maybe_layer->layerId();
9307   if (!isBatchedAtLevel(self, cur_level)) {
9308     return at::_ops::gelu_::call(self, approximate);
9309   }
9310   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9311   batch_rule(self_value, self_bdim, approximate);
9312   return self;
9313 }
9314 template <typename batch_rule_t, batch_rule_t batch_rule>
9315 at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) {
9316   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9317   auto maybe_layer = maybeCurrentDynamicLayer();
9318   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9319   int64_t cur_level = maybe_layer->layerId();
9320   if (!isBatchedAtLevel(self, cur_level)) {
9321     return at::_ops::gelu::call(self, approximate);
9322   }
9323   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9324   auto results = batch_rule(self_value, self_bdim, approximate);
9325   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9326 }
9327 template <typename batch_rule_t, batch_rule_t batch_rule>
9328 at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
9329   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9330   auto maybe_layer = maybeCurrentDynamicLayer();
9331   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9332   int64_t cur_level = maybe_layer->layerId();
9333   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
9334     return at::_ops::gelu_backward::call(grad_output, self, approximate);
9335   }
9336   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9337   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9338   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate);
9339   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9340 }
9341 template <typename batch_rule_t, batch_rule_t batch_rule>
9342 at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) {
9343   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9344   auto maybe_layer = maybeCurrentDynamicLayer();
9345   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9346   int64_t cur_level = maybe_layer->layerId();
9347   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
9348     return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self);
9349   }
9350   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
9351   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9352   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim);
9353   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9354 }
9355 template <typename batch_rule_t, batch_rule_t batch_rule>
9356 at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
9357   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9358   auto maybe_layer = maybeCurrentDynamicLayer();
9359   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9360   int64_t cur_level = maybe_layer->layerId();
9361   if (!isBatchedAtLevel(self, cur_level)) {
9362     return at::_ops::hardshrink::call(self, lambd);
9363   }
9364   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9365   auto results = batch_rule(self_value, self_bdim, lambd);
9366   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9367 }
9368 template <typename batch_rule_t, batch_rule_t batch_rule>
9369 at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
9370   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9371   auto maybe_layer = maybeCurrentDynamicLayer();
9372   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9373   int64_t cur_level = maybe_layer->layerId();
9374   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) {
9375     return at::_ops::hardshrink_backward::call(grad_out, self, lambd);
9376   }
9377   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
9378   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9379   auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd);
9380   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9381 }
9382 template <typename batch_rule_t, batch_rule_t batch_rule>
9383 at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) {
9384   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9385   auto maybe_layer = maybeCurrentDynamicLayer();
9386   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9387   int64_t cur_level = maybe_layer->layerId();
9388   if (!isBatchedAtLevel(self, cur_level)) {
9389     return at::_ops::rsqrt::call(self);
9390   }
9391   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9392   auto results = batch_rule(self_value, self_bdim);
9393   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9394 }
9395 template <typename batch_rule_t, batch_rule_t batch_rule>
9396 at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) {
9397   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9398   auto maybe_layer = maybeCurrentDynamicLayer();
9399   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9400   int64_t cur_level = maybe_layer->layerId();
9401   if (!isBatchedAtLevel(self, cur_level)) {
9402     return at::_ops::rsqrt_::call(self);
9403   }
9404   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9405   batch_rule(self_value, self_bdim);
9406   return self;
9407 }
9408 template <typename batch_rule_t, batch_rule_t batch_rule>
9409 at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) {
9410   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9411   auto maybe_layer = maybeCurrentDynamicLayer();
9412   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9413   int64_t cur_level = maybe_layer->layerId();
9414   if (!isBatchedAtLevel(self, cur_level)) {
9415     return at::_ops::select_Dimname::call(self, dim, index);
9416   }
9417   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9418   auto results = batch_rule(self_value, self_bdim, dim, index);
9419   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9420 }
9421 template <typename batch_rule_t, batch_rule_t batch_rule>
9422 at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
9423   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9424   auto maybe_layer = maybeCurrentDynamicLayer();
9425   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9426   int64_t cur_level = maybe_layer->layerId();
9427   if (!isBatchedAtLevel(self, cur_level)) {
9428     return at::_ops::select_int::call(self, dim, index);
9429   }
9430   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9431   auto results = batch_rule(self_value, self_bdim, dim, index);
9432   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9433 }
9434 template <typename batch_rule_t, batch_rule_t batch_rule>
9435 at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
9436   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9437   auto maybe_layer = maybeCurrentDynamicLayer();
9438   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9439   int64_t cur_level = maybe_layer->layerId();
9440   if (!isBatchedAtLevel(grad_output, cur_level)) {
9441     return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
9442   }
9443   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9444   auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index);
9445   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9446 }
9447 template <typename batch_rule_t, batch_rule_t batch_rule>
9448 at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
9449   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9450   auto maybe_layer = maybeCurrentDynamicLayer();
9451   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9452   int64_t cur_level = maybe_layer->layerId();
9453   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
9454     return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
9455   }
9456   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9457   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9458   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index);
9459   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9460 }
9461 template <typename batch_rule_t, batch_rule_t batch_rule>
9462 at::Tensor selu_generated_plumbing(const at::Tensor & self) {
9463   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9464   auto maybe_layer = maybeCurrentDynamicLayer();
9465   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9466   int64_t cur_level = maybe_layer->layerId();
9467   if (!isBatchedAtLevel(self, cur_level)) {
9468     return at::_ops::selu::call(self);
9469   }
9470   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9471   auto results = batch_rule(self_value, self_bdim);
9472   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9473 }
9474 template <typename batch_rule_t, batch_rule_t batch_rule>
9475 at::Tensor & selu__generated_plumbing(at::Tensor & self) {
9476   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9477   auto maybe_layer = maybeCurrentDynamicLayer();
9478   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9479   int64_t cur_level = maybe_layer->layerId();
9480   if (!isBatchedAtLevel(self, cur_level)) {
9481     return at::_ops::selu_::call(self);
9482   }
9483   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9484   batch_rule(self_value, self_bdim);
9485   return self;
9486 }
9487 template <typename batch_rule_t, batch_rule_t batch_rule>
9488 at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) {
9489   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9490   auto maybe_layer = maybeCurrentDynamicLayer();
9491   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9492   int64_t cur_level = maybe_layer->layerId();
9493   if (!isBatchedAtLevel(self, cur_level)) {
9494     return at::_ops::celu::call(self, alpha);
9495   }
9496   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9497   auto results = batch_rule(self_value, self_bdim, alpha);
9498   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9499 }
9500 template <typename batch_rule_t, batch_rule_t batch_rule>
9501 at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) {
9502   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9503   auto maybe_layer = maybeCurrentDynamicLayer();
9504   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9505   int64_t cur_level = maybe_layer->layerId();
9506   if (!isBatchedAtLevel(self, cur_level)) {
9507     return at::_ops::celu_::call(self, alpha);
9508   }
9509   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9510   batch_rule(self_value, self_bdim, alpha);
9511   return self;
9512 }
9513 template <typename batch_rule_t, batch_rule_t batch_rule>
9514 at::Tensor silu_generated_plumbing(const at::Tensor & self) {
9515   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9516   auto maybe_layer = maybeCurrentDynamicLayer();
9517   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9518   int64_t cur_level = maybe_layer->layerId();
9519   if (!isBatchedAtLevel(self, cur_level)) {
9520     return at::_ops::silu::call(self);
9521   }
9522   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9523   auto results = batch_rule(self_value, self_bdim);
9524   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9525 }
9526 template <typename batch_rule_t, batch_rule_t batch_rule>
9527 at::Tensor & silu__generated_plumbing(at::Tensor & self) {
9528   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9529   auto maybe_layer = maybeCurrentDynamicLayer();
9530   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9531   int64_t cur_level = maybe_layer->layerId();
9532   if (!isBatchedAtLevel(self, cur_level)) {
9533     return at::_ops::silu_::call(self);
9534   }
9535   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9536   batch_rule(self_value, self_bdim);
9537   return self;
9538 }
9539 template <typename batch_rule_t, batch_rule_t batch_rule>
9540 at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
9541   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9542   auto maybe_layer = maybeCurrentDynamicLayer();
9543   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9544   int64_t cur_level = maybe_layer->layerId();
9545   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
9546     return at::_ops::silu_backward::call(grad_output, self);
9547   }
9548   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9549   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9550   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
9551   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9552 }
9553 template <typename batch_rule_t, batch_rule_t batch_rule>
9554 at::Tensor mish_generated_plumbing(const at::Tensor & self) {
9555   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9556   auto maybe_layer = maybeCurrentDynamicLayer();
9557   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9558   int64_t cur_level = maybe_layer->layerId();
9559   if (!isBatchedAtLevel(self, cur_level)) {
9560     return at::_ops::mish::call(self);
9561   }
9562   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9563   auto results = batch_rule(self_value, self_bdim);
9564   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9565 }
9566 template <typename batch_rule_t, batch_rule_t batch_rule>
9567 at::Tensor & mish__generated_plumbing(at::Tensor & self) {
9568   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9569   auto maybe_layer = maybeCurrentDynamicLayer();
9570   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9571   int64_t cur_level = maybe_layer->layerId();
9572   if (!isBatchedAtLevel(self, cur_level)) {
9573     return at::_ops::mish_::call(self);
9574   }
9575   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9576   batch_rule(self_value, self_bdim);
9577   return self;
9578 }
9579 template <typename batch_rule_t, batch_rule_t batch_rule>
9580 at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
9581   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9582   auto maybe_layer = maybeCurrentDynamicLayer();
9583   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9584   int64_t cur_level = maybe_layer->layerId();
9585   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
9586     return at::_ops::mish_backward::call(grad_output, self);
9587   }
9588   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9589   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9590   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
9591   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9592 }
9593 template <typename batch_rule_t, batch_rule_t batch_rule>
9594 at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) {
9595   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9596   auto maybe_layer = maybeCurrentDynamicLayer();
9597   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9598   int64_t cur_level = maybe_layer->layerId();
9599   if (!isBatchedAtLevel(self, cur_level)) {
9600     return at::_ops::sigmoid::call(self);
9601   }
9602   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9603   auto results = batch_rule(self_value, self_bdim);
9604   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9605 }
9606 template <typename batch_rule_t, batch_rule_t batch_rule>
9607 at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) {
9608   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9609   auto maybe_layer = maybeCurrentDynamicLayer();
9610   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9611   int64_t cur_level = maybe_layer->layerId();
9612   if (!isBatchedAtLevel(self, cur_level)) {
9613     return at::_ops::sigmoid_::call(self);
9614   }
9615   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9616   batch_rule(self_value, self_bdim);
9617   return self;
9618 }
9619 template <typename batch_rule_t, batch_rule_t batch_rule>
9620 at::Tensor logit_generated_plumbing(const at::Tensor & self, ::std::optional<double> eps) {
9621   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9622   auto maybe_layer = maybeCurrentDynamicLayer();
9623   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9624   int64_t cur_level = maybe_layer->layerId();
9625   if (!isBatchedAtLevel(self, cur_level)) {
9626     return at::_ops::logit::call(self, eps);
9627   }
9628   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9629   auto results = batch_rule(self_value, self_bdim, eps);
9630   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9631 }
9632 template <typename batch_rule_t, batch_rule_t batch_rule>
9633 at::Tensor & logit__generated_plumbing(at::Tensor & self, ::std::optional<double> eps) {
9634   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9635   auto maybe_layer = maybeCurrentDynamicLayer();
9636   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9637   int64_t cur_level = maybe_layer->layerId();
9638   if (!isBatchedAtLevel(self, cur_level)) {
9639     return at::_ops::logit_::call(self, eps);
9640   }
9641   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9642   batch_rule(self_value, self_bdim, eps);
9643   return self;
9644 }
9645 template <typename batch_rule_t, batch_rule_t batch_rule>
9646 at::Tensor sin_generated_plumbing(const at::Tensor & self) {
9647   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9648   auto maybe_layer = maybeCurrentDynamicLayer();
9649   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9650   int64_t cur_level = maybe_layer->layerId();
9651   if (!isBatchedAtLevel(self, cur_level)) {
9652     return at::_ops::sin::call(self);
9653   }
9654   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9655   auto results = batch_rule(self_value, self_bdim);
9656   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9657 }
9658 template <typename batch_rule_t, batch_rule_t batch_rule>
9659 at::Tensor & sin__generated_plumbing(at::Tensor & self) {
9660   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9661   auto maybe_layer = maybeCurrentDynamicLayer();
9662   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9663   int64_t cur_level = maybe_layer->layerId();
9664   if (!isBatchedAtLevel(self, cur_level)) {
9665     return at::_ops::sin_::call(self);
9666   }
9667   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9668   batch_rule(self_value, self_bdim);
9669   return self;
9670 }
9671 template <typename batch_rule_t, batch_rule_t batch_rule>
9672 at::Tensor sinc_generated_plumbing(const at::Tensor & self) {
9673   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9674   auto maybe_layer = maybeCurrentDynamicLayer();
9675   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9676   int64_t cur_level = maybe_layer->layerId();
9677   if (!isBatchedAtLevel(self, cur_level)) {
9678     return at::_ops::sinc::call(self);
9679   }
9680   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9681   auto results = batch_rule(self_value, self_bdim);
9682   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9683 }
9684 template <typename batch_rule_t, batch_rule_t batch_rule>
9685 at::Tensor & sinc__generated_plumbing(at::Tensor & self) {
9686   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9687   auto maybe_layer = maybeCurrentDynamicLayer();
9688   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9689   int64_t cur_level = maybe_layer->layerId();
9690   if (!isBatchedAtLevel(self, cur_level)) {
9691     return at::_ops::sinc_::call(self);
9692   }
9693   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9694   batch_rule(self_value, self_bdim);
9695   return self;
9696 }
9697 template <typename batch_rule_t, batch_rule_t batch_rule>
9698 at::Tensor sinh_generated_plumbing(const at::Tensor & self) {
9699   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9700   auto maybe_layer = maybeCurrentDynamicLayer();
9701   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9702   int64_t cur_level = maybe_layer->layerId();
9703   if (!isBatchedAtLevel(self, cur_level)) {
9704     return at::_ops::sinh::call(self);
9705   }
9706   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9707   auto results = batch_rule(self_value, self_bdim);
9708   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9709 }
9710 template <typename batch_rule_t, batch_rule_t batch_rule>
9711 at::Tensor & sinh__generated_plumbing(at::Tensor & self) {
9712   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9713   auto maybe_layer = maybeCurrentDynamicLayer();
9714   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9715   int64_t cur_level = maybe_layer->layerId();
9716   if (!isBatchedAtLevel(self, cur_level)) {
9717     return at::_ops::sinh_::call(self);
9718   }
9719   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9720   batch_rule(self_value, self_bdim);
9721   return self;
9722 }
9723 template <typename batch_rule_t, batch_rule_t batch_rule>
9724 at::Tensor detach_generated_plumbing(const at::Tensor & self) {
9725   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9726   auto maybe_layer = maybeCurrentDynamicLayer();
9727   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9728   int64_t cur_level = maybe_layer->layerId();
9729   if (!isBatchedAtLevel(self, cur_level)) {
9730     return at::_ops::detach::call(self);
9731   }
9732   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9733   auto results = batch_rule(self_value, self_bdim);
9734   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9735 }
9736 template <typename batch_rule_t, batch_rule_t batch_rule>
9737 at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
9738   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9739   auto maybe_layer = maybeCurrentDynamicLayer();
9740   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9741   int64_t cur_level = maybe_layer->layerId();
9742   if (!isBatchedAtLevel(self, cur_level)) {
9743     return at::_ops::slice_Tensor::call(self, dim, start, end, step);
9744   }
9745   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9746   auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
9747   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9748 }
9749 template <typename batch_rule_t, batch_rule_t batch_rule>
9750 at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
9751   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9752   auto maybe_layer = maybeCurrentDynamicLayer();
9753   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9754   int64_t cur_level = maybe_layer->layerId();
9755   if (!isBatchedAtLevel(grad_output, cur_level)) {
9756     return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
9757   }
9758   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9759   auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step);
9760   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9761 }
9762 template <typename batch_rule_t, batch_rule_t batch_rule>
9763 at::Tensor slice_inverse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
9764   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9765   auto maybe_layer = maybeCurrentDynamicLayer();
9766   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9767   int64_t cur_level = maybe_layer->layerId();
9768   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
9769     return at::_ops::slice_inverse::call(self, src, dim, start, end, step);
9770   }
9771   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9772   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
9773   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
9774   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9775 }
9776 template <typename batch_rule_t, batch_rule_t batch_rule>
9777 at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
9778   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9779   auto maybe_layer = maybeCurrentDynamicLayer();
9780   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9781   int64_t cur_level = maybe_layer->layerId();
9782   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
9783     return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
9784   }
9785   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9786   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
9787   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
9788   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9789 }
9790 template <typename batch_rule_t, batch_rule_t batch_rule>
9791 at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
9792   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9793   auto maybe_layer = maybeCurrentDynamicLayer();
9794   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9795   int64_t cur_level = maybe_layer->layerId();
9796   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
9797     return at::_ops::select_scatter::call(self, src, dim, index);
9798   }
9799   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9800   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
9801   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index);
9802   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9803 }
9804 template <typename batch_rule_t, batch_rule_t batch_rule>
9805 at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
9806   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9807   auto maybe_layer = maybeCurrentDynamicLayer();
9808   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9809   int64_t cur_level = maybe_layer->layerId();
9810   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
9811     return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2);
9812   }
9813   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9814   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
9815   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2);
9816   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9817 }
9818 template <typename batch_rule_t, batch_rule_t batch_rule>
9819 at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
9820   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9821   auto maybe_layer = maybeCurrentDynamicLayer();
9822   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9823   int64_t cur_level = maybe_layer->layerId();
9824   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
9825     return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
9826   }
9827   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9828   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
9829   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset);
9830   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9831 }
9832 template <typename batch_rule_t, batch_rule_t batch_rule>
9833 at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
9834   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9835   auto maybe_layer = maybeCurrentDynamicLayer();
9836   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9837   int64_t cur_level = maybe_layer->layerId();
9838   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
9839     return at::_ops::smm::call(self, mat2);
9840   }
9841   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9842   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
9843   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
9844   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9845 }
9846 template <typename batch_rule_t, batch_rule_t batch_rule>
9847 at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
9848   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9849   auto maybe_layer = maybeCurrentDynamicLayer();
9850   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9851   int64_t cur_level = maybe_layer->layerId();
9852   if (!isBatchedAtLevel(self, cur_level)) {
9853     return at::_ops::softmax_int::call(self, dim, dtype);
9854   }
9855   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9856   auto results = batch_rule(self_value, self_bdim, dim, dtype);
9857   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9858 }
9859 template <typename batch_rule_t, batch_rule_t batch_rule>
9860 at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
9861   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9862   auto maybe_layer = maybeCurrentDynamicLayer();
9863   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9864   int64_t cur_level = maybe_layer->layerId();
9865   if (!isBatchedAtLevel(self, cur_level)) {
9866     return at::_ops::softmax_Dimname::call(self, dim, dtype);
9867   }
9868   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9869   auto results = batch_rule(self_value, self_bdim, dim, dtype);
9870   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9871 }
9872 template <typename batch_rule_t, batch_rule_t batch_rule>
9873 at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
9874   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9875   auto maybe_layer = maybeCurrentDynamicLayer();
9876   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9877   int64_t cur_level = maybe_layer->layerId();
9878   if (!isBatchedAtLevel(self, cur_level)) {
9879     return at::_ops::_softmax::call(self, dim, half_to_float);
9880   }
9881   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9882   auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
9883   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9884 }
9885 template <typename batch_rule_t, batch_rule_t batch_rule>
9886 at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
9887   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9888   auto maybe_layer = maybeCurrentDynamicLayer();
9889   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9890   int64_t cur_level = maybe_layer->layerId();
9891   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
9892     return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
9893   }
9894   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
9895   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
9896   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
9897   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9898 }
9899 template <typename batch_rule_t, batch_rule_t batch_rule>
9900 ::std::vector<at::Tensor> unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
9901   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9902   auto maybe_layer = maybeCurrentDynamicLayer();
9903   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9904   int64_t cur_level = maybe_layer->layerId();
9905   if (!isBatchedAtLevel(self, cur_level)) {
9906     return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
9907   }
9908   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9909   auto results = batch_rule(self_value, self_bdim, split_size, dim);
9910   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9911 }
9912 template <typename batch_rule_t, batch_rule_t batch_rule>
9913 ::std::vector<at::Tensor> split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
9914   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9915   auto maybe_layer = maybeCurrentDynamicLayer();
9916   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9917   int64_t cur_level = maybe_layer->layerId();
9918   if (!isBatchedAtLevel(self, cur_level)) {
9919     return at::_ops::split_Tensor::call(self, split_size, dim);
9920   }
9921   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9922   auto results = batch_rule(self_value, self_bdim, split_size, dim);
9923   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9924 }
9925 template <typename batch_rule_t, batch_rule_t batch_rule>
9926 ::std::vector<at::Tensor> split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
9927   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9928   auto maybe_layer = maybeCurrentDynamicLayer();
9929   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9930   int64_t cur_level = maybe_layer->layerId();
9931   if (!isBatchedAtLevel(self, cur_level)) {
9932     return at::_ops::split_sizes::call(self, split_size, dim);
9933   }
9934   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9935   auto results = batch_rule(self_value, self_bdim, split_size, dim);
9936   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9937 }
9938 template <typename batch_rule_t, batch_rule_t batch_rule>
9939 ::std::vector<at::Tensor> unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
9940   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9941   auto maybe_layer = maybeCurrentDynamicLayer();
9942   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9943   int64_t cur_level = maybe_layer->layerId();
9944   if (!isBatchedAtLevel(self, cur_level)) {
9945     return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
9946   }
9947   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9948   auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
9949   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9950 }
9951 template <typename batch_rule_t, batch_rule_t batch_rule>
9952 ::std::vector<at::Tensor> split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
9953   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9954   auto maybe_layer = maybeCurrentDynamicLayer();
9955   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9956   int64_t cur_level = maybe_layer->layerId();
9957   if (!isBatchedAtLevel(self, cur_level)) {
9958     return at::_ops::split_with_sizes::call(self, split_sizes, dim);
9959   }
9960   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9961   auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
9962   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9963 }
9964 template <typename batch_rule_t, batch_rule_t batch_rule>
9965 ::std::vector<at::Tensor> hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
9966   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9967   auto maybe_layer = maybeCurrentDynamicLayer();
9968   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9969   int64_t cur_level = maybe_layer->layerId();
9970   if (!isBatchedAtLevel(self, cur_level)) {
9971     return at::_ops::hsplit_int::call(self, sections);
9972   }
9973   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9974   auto results = batch_rule(self_value, self_bdim, sections);
9975   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9976 }
9977 template <typename batch_rule_t, batch_rule_t batch_rule>
9978 ::std::vector<at::Tensor> hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
9979   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9980   auto maybe_layer = maybeCurrentDynamicLayer();
9981   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9982   int64_t cur_level = maybe_layer->layerId();
9983   if (!isBatchedAtLevel(self, cur_level)) {
9984     return at::_ops::hsplit_array::call(self, indices);
9985   }
9986   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
9987   auto results = batch_rule(self_value, self_bdim, indices);
9988   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
9989 }
9990 template <typename batch_rule_t, batch_rule_t batch_rule>
9991 ::std::vector<at::Tensor> vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
9992   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9993   auto maybe_layer = maybeCurrentDynamicLayer();
9994   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9995   int64_t cur_level = maybe_layer->layerId();
9996   if (!isBatchedAtLevel(self, cur_level)) {
9997     return at::_ops::vsplit_int::call(self, sections);
9998   }
9999   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10000   auto results = batch_rule(self_value, self_bdim, sections);
10001   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
10002 }
10003 template <typename batch_rule_t, batch_rule_t batch_rule>
10004 ::std::vector<at::Tensor> vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
10005   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10006   auto maybe_layer = maybeCurrentDynamicLayer();
10007   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10008   int64_t cur_level = maybe_layer->layerId();
10009   if (!isBatchedAtLevel(self, cur_level)) {
10010     return at::_ops::vsplit_array::call(self, indices);
10011   }
10012   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10013   auto results = batch_rule(self_value, self_bdim, indices);
10014   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
10015 }
10016 template <typename batch_rule_t, batch_rule_t batch_rule>
10017 ::std::vector<at::Tensor> dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
10018   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10019   auto maybe_layer = maybeCurrentDynamicLayer();
10020   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10021   int64_t cur_level = maybe_layer->layerId();
10022   if (!isBatchedAtLevel(self, cur_level)) {
10023     return at::_ops::dsplit_int::call(self, sections);
10024   }
10025   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10026   auto results = batch_rule(self_value, self_bdim, sections);
10027   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
10028 }
10029 template <typename batch_rule_t, batch_rule_t batch_rule>
10030 ::std::vector<at::Tensor> dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
10031   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10032   auto maybe_layer = maybeCurrentDynamicLayer();
10033   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10034   int64_t cur_level = maybe_layer->layerId();
10035   if (!isBatchedAtLevel(self, cur_level)) {
10036     return at::_ops::dsplit_array::call(self, indices);
10037   }
10038   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10039   auto results = batch_rule(self_value, self_bdim, indices);
10040   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
10041 }
10042 template <typename batch_rule_t, batch_rule_t batch_rule>
10043 at::Tensor squeeze_generated_plumbing(const at::Tensor & self) {
10044   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10045   auto maybe_layer = maybeCurrentDynamicLayer();
10046   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10047   int64_t cur_level = maybe_layer->layerId();
10048   if (!isBatchedAtLevel(self, cur_level)) {
10049     return at::_ops::squeeze::call(self);
10050   }
10051   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10052   auto results = batch_rule(self_value, self_bdim);
10053   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10054 }
10055 template <typename batch_rule_t, batch_rule_t batch_rule>
10056 at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
10057   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10058   auto maybe_layer = maybeCurrentDynamicLayer();
10059   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10060   int64_t cur_level = maybe_layer->layerId();
10061   if (!isBatchedAtLevel(self, cur_level)) {
10062     return at::_ops::squeeze_dim::call(self, dim);
10063   }
10064   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10065   auto results = batch_rule(self_value, self_bdim, dim);
10066   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10067 }
10068 template <typename batch_rule_t, batch_rule_t batch_rule>
10069 at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
10070   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10071   auto maybe_layer = maybeCurrentDynamicLayer();
10072   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10073   int64_t cur_level = maybe_layer->layerId();
10074   if (!isBatchedAtLevel(self, cur_level)) {
10075     return at::_ops::squeeze_dimname::call(self, dim);
10076   }
10077   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10078   auto results = batch_rule(self_value, self_bdim, dim);
10079   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10080 }
10081 template <typename batch_rule_t, batch_rule_t batch_rule>
10082 at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
10083   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10084   auto maybe_layer = maybeCurrentDynamicLayer();
10085   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10086   int64_t cur_level = maybe_layer->layerId();
10087   if (!isBatchedAtLevel(self, cur_level)) {
10088     return at::_ops::squeeze_dims::call(self, dim);
10089   }
10090   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10091   auto results = batch_rule(self_value, self_bdim, dim);
10092   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10093 }
10094 template <typename batch_rule_t, batch_rule_t batch_rule>
10095 at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
10096   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10097   auto maybe_layer = maybeCurrentDynamicLayer();
10098   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10099   int64_t cur_level = maybe_layer->layerId();
10100   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
10101     return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha);
10102   }
10103   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10104   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
10105   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
10106   auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
10107   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10108 }
10109 template <typename batch_rule_t, batch_rule_t batch_rule>
10110 at::Tensor _chunk_cat_generated_plumbing(at::TensorList tensors, int64_t dim, int64_t num_chunks) {
10111   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10112   auto maybe_layer = maybeCurrentDynamicLayer();
10113   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10114   int64_t cur_level = maybe_layer->layerId();
10115   if (!isBatchedAtLevel(tensors, cur_level)) {
10116     return at::_ops::_chunk_cat::call(tensors, dim, num_chunks);
10117   }
10118 
10119   auto results = batch_rule(tensors, dim, num_chunks);
10120   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10121 }
10122 template <typename batch_rule_t, batch_rule_t batch_rule>
10123 at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
10124   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10125   auto maybe_layer = maybeCurrentDynamicLayer();
10126   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10127   int64_t cur_level = maybe_layer->layerId();
10128   if (!isBatchedAtLevel(tensors, cur_level)) {
10129     return at::_ops::stack::call(tensors, dim);
10130   }
10131 
10132   auto results = batch_rule(tensors, dim);
10133   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10134 }
10135 template <typename batch_rule_t, batch_rule_t batch_rule>
10136 at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
10137   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10138   auto maybe_layer = maybeCurrentDynamicLayer();
10139   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10140   int64_t cur_level = maybe_layer->layerId();
10141   if (!isBatchedAtLevel(tensors, cur_level)) {
10142     return at::_ops::_stack::call(tensors, dim);
10143   }
10144 
10145   auto results = batch_rule(tensors, dim);
10146   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10147 }
10148 template <typename batch_rule_t, batch_rule_t batch_rule>
10149 at::Tensor hstack_generated_plumbing(at::TensorList tensors) {
10150   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10151   auto maybe_layer = maybeCurrentDynamicLayer();
10152   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10153   int64_t cur_level = maybe_layer->layerId();
10154   if (!isBatchedAtLevel(tensors, cur_level)) {
10155     return at::_ops::hstack::call(tensors);
10156   }
10157 
10158   auto results = batch_rule(tensors);
10159   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10160 }
10161 template <typename batch_rule_t, batch_rule_t batch_rule>
10162 at::Tensor vstack_generated_plumbing(at::TensorList tensors) {
10163   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10164   auto maybe_layer = maybeCurrentDynamicLayer();
10165   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10166   int64_t cur_level = maybe_layer->layerId();
10167   if (!isBatchedAtLevel(tensors, cur_level)) {
10168     return at::_ops::vstack::call(tensors);
10169   }
10170 
10171   auto results = batch_rule(tensors);
10172   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10173 }
10174 template <typename batch_rule_t, batch_rule_t batch_rule>
10175 at::Tensor dstack_generated_plumbing(at::TensorList tensors) {
10176   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10177   auto maybe_layer = maybeCurrentDynamicLayer();
10178   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10179   int64_t cur_level = maybe_layer->layerId();
10180   if (!isBatchedAtLevel(tensors, cur_level)) {
10181     return at::_ops::dstack::call(tensors);
10182   }
10183 
10184   auto results = batch_rule(tensors);
10185   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10186 }
10187 template <typename batch_rule_t, batch_rule_t batch_rule>
10188 at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool normalized, ::std::optional<bool> onesided, ::std::optional<bool> return_complex, ::std::optional<bool> align_to_window) {
10189   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10190   auto maybe_layer = maybeCurrentDynamicLayer();
10191   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10192   int64_t cur_level = maybe_layer->layerId();
10193   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
10194     return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex, align_to_window);
10195   }
10196   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10197   std::optional<Tensor> window_value;
10198   std::optional<int64_t> window_bdim;
10199   if (window) {
10200       std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
10201   }
10202   auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex, align_to_window);
10203   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10204 }
10205 template <typename batch_rule_t, batch_rule_t batch_rule>
10206 at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, ::std::optional<bool> onesided, ::std::optional<bool> return_complex, ::std::optional<bool> align_to_window) {
10207   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10208   auto maybe_layer = maybeCurrentDynamicLayer();
10209   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10210   int64_t cur_level = maybe_layer->layerId();
10211   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
10212     return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex, align_to_window);
10213   }
10214   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10215   std::optional<Tensor> window_value;
10216   std::optional<int64_t> window_bdim;
10217   if (window) {
10218       std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
10219   }
10220   auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex, align_to_window);
10221   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10222 }
10223 template <typename batch_rule_t, batch_rule_t batch_rule>
10224 at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, bool normalized, ::std::optional<bool> onesided, ::std::optional<int64_t> length, bool return_complex) {
10225   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10226   auto maybe_layer = maybeCurrentDynamicLayer();
10227   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10228   int64_t cur_level = maybe_layer->layerId();
10229   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
10230     return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
10231   }
10232   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10233   std::optional<Tensor> window_value;
10234   std::optional<int64_t> window_bdim;
10235   if (window) {
10236       std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
10237   }
10238   auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex);
10239   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10240 }
10241 template <typename batch_rule_t, batch_rule_t batch_rule>
10242 at::Tensor sum_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
10243   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10244   auto maybe_layer = maybeCurrentDynamicLayer();
10245   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10246   int64_t cur_level = maybe_layer->layerId();
10247   if (!isBatchedAtLevel(self, cur_level)) {
10248     return at::_ops::sum::call(self, dtype);
10249   }
10250   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10251   auto results = batch_rule(self_value, self_bdim, dtype);
10252   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10253 }
10254 template <typename batch_rule_t, batch_rule_t batch_rule>
10255 at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
10256   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10257   auto maybe_layer = maybeCurrentDynamicLayer();
10258   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10259   int64_t cur_level = maybe_layer->layerId();
10260   if (!isBatchedAtLevel(self, cur_level)) {
10261     return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype);
10262   }
10263   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10264   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
10265   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10266 }
10267 template <typename batch_rule_t, batch_rule_t batch_rule>
10268 at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
10269   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10270   auto maybe_layer = maybeCurrentDynamicLayer();
10271   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10272   int64_t cur_level = maybe_layer->layerId();
10273   if (!isBatchedAtLevel(self, cur_level)) {
10274     return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype);
10275   }
10276   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10277   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
10278   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10279 }
10280 template <typename batch_rule_t, batch_rule_t batch_rule>
10281 at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
10282   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10283   auto maybe_layer = maybeCurrentDynamicLayer();
10284   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10285   int64_t cur_level = maybe_layer->layerId();
10286   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10287     return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
10288   }
10289   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
10290   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10291   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim);
10292   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10293 }
10294 template <typename batch_rule_t, batch_rule_t batch_rule>
10295 at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
10296   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10297   auto maybe_layer = maybeCurrentDynamicLayer();
10298   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10299   int64_t cur_level = maybe_layer->layerId();
10300   if (!isBatchedAtLevel(self, cur_level)) {
10301     return at::_ops::nansum::call(self, dim, keepdim, dtype);
10302   }
10303   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10304   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
10305   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10306 }
10307 template <typename batch_rule_t, batch_rule_t batch_rule>
10308 at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
10309   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10310   auto maybe_layer = maybeCurrentDynamicLayer();
10311   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10312   int64_t cur_level = maybe_layer->layerId();
10313   if (!isBatchedAtLevel(self, cur_level)) {
10314     return at::_ops::sum_to_size::call(self, size);
10315   }
10316   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10317   auto results = batch_rule(self_value, self_bdim, size);
10318   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10319 }
10320 template <typename batch_rule_t, batch_rule_t batch_rule>
10321 at::Tensor sqrt_generated_plumbing(const at::Tensor & self) {
10322   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10323   auto maybe_layer = maybeCurrentDynamicLayer();
10324   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10325   int64_t cur_level = maybe_layer->layerId();
10326   if (!isBatchedAtLevel(self, cur_level)) {
10327     return at::_ops::sqrt::call(self);
10328   }
10329   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10330   auto results = batch_rule(self_value, self_bdim);
10331   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10332 }
10333 template <typename batch_rule_t, batch_rule_t batch_rule>
10334 at::Tensor & sqrt__generated_plumbing(at::Tensor & self) {
10335   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10336   auto maybe_layer = maybeCurrentDynamicLayer();
10337   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10338   int64_t cur_level = maybe_layer->layerId();
10339   if (!isBatchedAtLevel(self, cur_level)) {
10340     return at::_ops::sqrt_::call(self);
10341   }
10342   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10343   batch_rule(self_value, self_bdim);
10344   return self;
10345 }
10346 template <typename batch_rule_t, batch_rule_t batch_rule>
10347 at::Tensor square_generated_plumbing(const at::Tensor & self) {
10348   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10349   auto maybe_layer = maybeCurrentDynamicLayer();
10350   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10351   int64_t cur_level = maybe_layer->layerId();
10352   if (!isBatchedAtLevel(self, cur_level)) {
10353     return at::_ops::square::call(self);
10354   }
10355   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10356   auto results = batch_rule(self_value, self_bdim);
10357   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10358 }
10359 template <typename batch_rule_t, batch_rule_t batch_rule>
10360 at::Tensor & square__generated_plumbing(at::Tensor & self) {
10361   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10362   auto maybe_layer = maybeCurrentDynamicLayer();
10363   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10364   int64_t cur_level = maybe_layer->layerId();
10365   if (!isBatchedAtLevel(self, cur_level)) {
10366     return at::_ops::square_::call(self);
10367   }
10368   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10369   batch_rule(self_value, self_bdim);
10370   return self;
10371 }
10372 template <typename batch_rule_t, batch_rule_t batch_rule>
10373 at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) {
10374   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10375   auto maybe_layer = maybeCurrentDynamicLayer();
10376   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10377   int64_t cur_level = maybe_layer->layerId();
10378   if (!isBatchedAtLevel(self, cur_level)) {
10379     return at::_ops::std::call(self, unbiased);
10380   }
10381   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10382   auto results = batch_rule(self_value, self_bdim, unbiased);
10383   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10384 }
10385 template <typename batch_rule_t, batch_rule_t batch_rule>
10386 at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10387   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10388   auto maybe_layer = maybeCurrentDynamicLayer();
10389   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10390   int64_t cur_level = maybe_layer->layerId();
10391   if (!isBatchedAtLevel(self, cur_level)) {
10392     return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
10393   }
10394   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10395   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
10396   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10397 }
10398 template <typename batch_rule_t, batch_rule_t batch_rule>
10399 at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
10400   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10401   auto maybe_layer = maybeCurrentDynamicLayer();
10402   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10403   int64_t cur_level = maybe_layer->layerId();
10404   if (!isBatchedAtLevel(self, cur_level)) {
10405     return at::_ops::std_correction::call(self, dim, correction, keepdim);
10406   }
10407   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10408   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
10409   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10410 }
10411 template <typename batch_rule_t, batch_rule_t batch_rule>
10412 ::std::tuple<at::Tensor,at::Tensor> std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
10413   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10414   auto maybe_layer = maybeCurrentDynamicLayer();
10415   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10416   int64_t cur_level = maybe_layer->layerId();
10417   if (!isBatchedAtLevel(self, cur_level)) {
10418     return at::_ops::std_mean::call(self, unbiased);
10419   }
10420   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10421   auto results = batch_rule(self_value, self_bdim, unbiased);
10422   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
10423 }
10424 template <typename batch_rule_t, batch_rule_t batch_rule>
10425 ::std::tuple<at::Tensor,at::Tensor> std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10426   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10427   auto maybe_layer = maybeCurrentDynamicLayer();
10428   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10429   int64_t cur_level = maybe_layer->layerId();
10430   if (!isBatchedAtLevel(self, cur_level)) {
10431     return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim);
10432   }
10433   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10434   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
10435   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
10436 }
10437 template <typename batch_rule_t, batch_rule_t batch_rule>
10438 ::std::tuple<at::Tensor,at::Tensor> std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
10439   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10440   auto maybe_layer = maybeCurrentDynamicLayer();
10441   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10442   int64_t cur_level = maybe_layer->layerId();
10443   if (!isBatchedAtLevel(self, cur_level)) {
10444     return at::_ops::std_mean_correction::call(self, dim, correction, keepdim);
10445   }
10446   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10447   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
10448   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
10449 }
10450 template <typename batch_rule_t, batch_rule_t batch_rule>
10451 ::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10452   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10453   auto maybe_layer = maybeCurrentDynamicLayer();
10454   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10455   int64_t cur_level = maybe_layer->layerId();
10456   if (!isBatchedAtLevel(self, cur_level)) {
10457     return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim);
10458   }
10459   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10460   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
10461   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
10462 }
10463 template <typename batch_rule_t, batch_rule_t batch_rule>
10464 ::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
10465   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10466   auto maybe_layer = maybeCurrentDynamicLayer();
10467   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10468   int64_t cur_level = maybe_layer->layerId();
10469   if (!isBatchedAtLevel(self, cur_level)) {
10470     return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim);
10471   }
10472   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10473   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
10474   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
10475 }
10476 template <typename batch_rule_t, batch_rule_t batch_rule>
10477 at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10478   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10479   auto maybe_layer = maybeCurrentDynamicLayer();
10480   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10481   int64_t cur_level = maybe_layer->layerId();
10482   if (!isBatchedAtLevel(self, cur_level)) {
10483     return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
10484   }
10485   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10486   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
10487   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10488 }
10489 template <typename batch_rule_t, batch_rule_t batch_rule>
10490 at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
10491   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10492   auto maybe_layer = maybeCurrentDynamicLayer();
10493   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10494   int64_t cur_level = maybe_layer->layerId();
10495   if (!isBatchedAtLevel(self, cur_level)) {
10496     return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
10497   }
10498   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10499   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
10500   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10501 }
10502 template <typename batch_rule_t, batch_rule_t batch_rule>
10503 at::Tensor prod_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
10504   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10505   auto maybe_layer = maybeCurrentDynamicLayer();
10506   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10507   int64_t cur_level = maybe_layer->layerId();
10508   if (!isBatchedAtLevel(self, cur_level)) {
10509     return at::_ops::prod::call(self, dtype);
10510   }
10511   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10512   auto results = batch_rule(self_value, self_bdim, dtype);
10513   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10514 }
10515 template <typename batch_rule_t, batch_rule_t batch_rule>
10516 at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
10517   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10518   auto maybe_layer = maybeCurrentDynamicLayer();
10519   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10520   int64_t cur_level = maybe_layer->layerId();
10521   if (!isBatchedAtLevel(self, cur_level)) {
10522     return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype);
10523   }
10524   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10525   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
10526   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10527 }
10528 template <typename batch_rule_t, batch_rule_t batch_rule>
10529 at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
10530   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10531   auto maybe_layer = maybeCurrentDynamicLayer();
10532   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10533   int64_t cur_level = maybe_layer->layerId();
10534   if (!isBatchedAtLevel(self, cur_level)) {
10535     return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype);
10536   }
10537   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10538   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
10539   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10540 }
10541 template <typename batch_rule_t, batch_rule_t batch_rule>
10542 at::Tensor t_generated_plumbing(const at::Tensor & self) {
10543   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10544   auto maybe_layer = maybeCurrentDynamicLayer();
10545   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10546   int64_t cur_level = maybe_layer->layerId();
10547   if (!isBatchedAtLevel(self, cur_level)) {
10548     return at::_ops::t::call(self);
10549   }
10550   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10551   auto results = batch_rule(self_value, self_bdim);
10552   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10553 }
10554 template <typename batch_rule_t, batch_rule_t batch_rule>
10555 at::Tensor tan_generated_plumbing(const at::Tensor & self) {
10556   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10557   auto maybe_layer = maybeCurrentDynamicLayer();
10558   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10559   int64_t cur_level = maybe_layer->layerId();
10560   if (!isBatchedAtLevel(self, cur_level)) {
10561     return at::_ops::tan::call(self);
10562   }
10563   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10564   auto results = batch_rule(self_value, self_bdim);
10565   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10566 }
10567 template <typename batch_rule_t, batch_rule_t batch_rule>
10568 at::Tensor & tan__generated_plumbing(at::Tensor & self) {
10569   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10570   auto maybe_layer = maybeCurrentDynamicLayer();
10571   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10572   int64_t cur_level = maybe_layer->layerId();
10573   if (!isBatchedAtLevel(self, cur_level)) {
10574     return at::_ops::tan_::call(self);
10575   }
10576   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10577   batch_rule(self_value, self_bdim);
10578   return self;
10579 }
10580 template <typename batch_rule_t, batch_rule_t batch_rule>
10581 at::Tensor tanh_generated_plumbing(const at::Tensor & self) {
10582   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10583   auto maybe_layer = maybeCurrentDynamicLayer();
10584   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10585   int64_t cur_level = maybe_layer->layerId();
10586   if (!isBatchedAtLevel(self, cur_level)) {
10587     return at::_ops::tanh::call(self);
10588   }
10589   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10590   auto results = batch_rule(self_value, self_bdim);
10591   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10592 }
10593 template <typename batch_rule_t, batch_rule_t batch_rule>
10594 at::Tensor & tanh__generated_plumbing(at::Tensor & self) {
10595   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10596   auto maybe_layer = maybeCurrentDynamicLayer();
10597   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10598   int64_t cur_level = maybe_layer->layerId();
10599   if (!isBatchedAtLevel(self, cur_level)) {
10600     return at::_ops::tanh_::call(self);
10601   }
10602   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10603   batch_rule(self_value, self_bdim);
10604   return self;
10605 }
10606 template <typename batch_rule_t, batch_rule_t batch_rule>
10607 at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
10608   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10609   auto maybe_layer = maybeCurrentDynamicLayer();
10610   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10611   int64_t cur_level = maybe_layer->layerId();
10612   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
10613     return at::_ops::tensordot::call(self, other, dims_self, dims_other);
10614   }
10615   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10616   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
10617   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other);
10618   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10619 }
10620 template <typename batch_rule_t, batch_rule_t batch_rule>
10621 at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
10622   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10623   auto maybe_layer = maybeCurrentDynamicLayer();
10624   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10625   int64_t cur_level = maybe_layer->layerId();
10626   if (!isBatchedAtLevel(self, cur_level)) {
10627     return at::_ops::threshold::call(self, threshold, value);
10628   }
10629   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10630   auto results = batch_rule(self_value, self_bdim, threshold, value);
10631   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10632 }
10633 template <typename batch_rule_t, batch_rule_t batch_rule>
10634 at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
10635   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10636   auto maybe_layer = maybeCurrentDynamicLayer();
10637   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10638   int64_t cur_level = maybe_layer->layerId();
10639   if (!isBatchedAtLevel(self, cur_level)) {
10640     return at::_ops::threshold_::call(self, threshold, value);
10641   }
10642   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10643   batch_rule(self_value, self_bdim, threshold, value);
10644   return self;
10645 }
10646 template <typename batch_rule_t, batch_rule_t batch_rule>
10647 at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
10648   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10649   auto maybe_layer = maybeCurrentDynamicLayer();
10650   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10651   int64_t cur_level = maybe_layer->layerId();
10652   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10653     return at::_ops::threshold_backward::call(grad_output, self, threshold);
10654   }
10655   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
10656   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10657   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold);
10658   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10659 }
10660 template <typename batch_rule_t, batch_rule_t batch_rule>
10661 at::Tensor tile_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dims) {
10662   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10663   auto maybe_layer = maybeCurrentDynamicLayer();
10664   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10665   int64_t cur_level = maybe_layer->layerId();
10666   if (!isBatchedAtLevel(self, cur_level)) {
10667     return at::_ops::tile::call(self, dims);
10668   }
10669   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10670   auto results = batch_rule(self_value, self_bdim, dims);
10671   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10672 }
10673 template <typename batch_rule_t, batch_rule_t batch_rule>
10674 at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
10675   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10676   auto maybe_layer = maybeCurrentDynamicLayer();
10677   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10678   int64_t cur_level = maybe_layer->layerId();
10679   if (!isBatchedAtLevel(self, cur_level)) {
10680     return at::_ops::transpose_int::call(self, dim0, dim1);
10681   }
10682   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10683   auto results = batch_rule(self_value, self_bdim, dim0, dim1);
10684   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10685 }
10686 template <typename batch_rule_t, batch_rule_t batch_rule>
10687 at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
10688   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10689   auto maybe_layer = maybeCurrentDynamicLayer();
10690   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10691   int64_t cur_level = maybe_layer->layerId();
10692   if (!isBatchedAtLevel(self, cur_level)) {
10693     return at::_ops::transpose_Dimname::call(self, dim0, dim1);
10694   }
10695   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10696   auto results = batch_rule(self_value, self_bdim, dim0, dim1);
10697   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10698 }
10699 template <typename batch_rule_t, batch_rule_t batch_rule>
10700 at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
10701   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10702   auto maybe_layer = maybeCurrentDynamicLayer();
10703   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10704   int64_t cur_level = maybe_layer->layerId();
10705   if (!isBatchedAtLevel(self, cur_level)) {
10706     return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
10707   }
10708   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10709   auto results = batch_rule(self_value, self_bdim, dim0, dim1);
10710   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10711 }
10712 template <typename batch_rule_t, batch_rule_t batch_rule>
10713 at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) {
10714   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10715   auto maybe_layer = maybeCurrentDynamicLayer();
10716   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10717   int64_t cur_level = maybe_layer->layerId();
10718   if (!isBatchedAtLevel(self, cur_level)) {
10719     return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
10720   }
10721   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10722   batch_rule(self_value, self_bdim, dim0, dim1);
10723   return self;
10724 }
10725 template <typename batch_rule_t, batch_rule_t batch_rule>
10726 at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) {
10727   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10728   auto maybe_layer = maybeCurrentDynamicLayer();
10729   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10730   int64_t cur_level = maybe_layer->layerId();
10731   if (!isBatchedAtLevel(self, cur_level)) {
10732     return at::_ops::one_hot::call(self, num_classes);
10733   }
10734   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10735   auto results = batch_rule(self_value, self_bdim, num_classes);
10736   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10737 }
10738 template <typename batch_rule_t, batch_rule_t batch_rule>
10739 at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
10740   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10741   auto maybe_layer = maybeCurrentDynamicLayer();
10742   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10743   int64_t cur_level = maybe_layer->layerId();
10744   if (!isBatchedAtLevel(self, cur_level)) {
10745     return at::_ops::flip::call(self, dims);
10746   }
10747   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10748   auto results = batch_rule(self_value, self_bdim, dims);
10749   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10750 }
10751 template <typename batch_rule_t, batch_rule_t batch_rule>
10752 at::Tensor fliplr_generated_plumbing(const at::Tensor & self) {
10753   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10754   auto maybe_layer = maybeCurrentDynamicLayer();
10755   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10756   int64_t cur_level = maybe_layer->layerId();
10757   if (!isBatchedAtLevel(self, cur_level)) {
10758     return at::_ops::fliplr::call(self);
10759   }
10760   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10761   auto results = batch_rule(self_value, self_bdim);
10762   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10763 }
10764 template <typename batch_rule_t, batch_rule_t batch_rule>
10765 at::Tensor flipud_generated_plumbing(const at::Tensor & self) {
10766   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10767   auto maybe_layer = maybeCurrentDynamicLayer();
10768   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10769   int64_t cur_level = maybe_layer->layerId();
10770   if (!isBatchedAtLevel(self, cur_level)) {
10771     return at::_ops::flipud::call(self);
10772   }
10773   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10774   auto results = batch_rule(self_value, self_bdim);
10775   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10776 }
10777 template <typename batch_rule_t, batch_rule_t batch_rule>
10778 at::Tensor roll_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) {
10779   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10780   auto maybe_layer = maybeCurrentDynamicLayer();
10781   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10782   int64_t cur_level = maybe_layer->layerId();
10783   if (!isBatchedAtLevel(self, cur_level)) {
10784     return at::_ops::roll::call(self, shifts, dims);
10785   }
10786   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10787   auto results = batch_rule(self_value, self_bdim, shifts, dims);
10788   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10789 }
10790 template <typename batch_rule_t, batch_rule_t batch_rule>
10791 at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
10792   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10793   auto maybe_layer = maybeCurrentDynamicLayer();
10794   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10795   int64_t cur_level = maybe_layer->layerId();
10796   if (!isBatchedAtLevel(self, cur_level)) {
10797     return at::_ops::rot90::call(self, k, dims);
10798   }
10799   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10800   auto results = batch_rule(self_value, self_bdim, k, dims);
10801   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10802 }
10803 template <typename batch_rule_t, batch_rule_t batch_rule>
10804 at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
10805   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10806   auto maybe_layer = maybeCurrentDynamicLayer();
10807   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10808   int64_t cur_level = maybe_layer->layerId();
10809   if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
10810     return at::_ops::trapezoid_x::call(y, x, dim);
10811   }
10812   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
10813   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
10814   auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
10815   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10816 }
10817 template <typename batch_rule_t, batch_rule_t batch_rule>
10818 at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
10819   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10820   auto maybe_layer = maybeCurrentDynamicLayer();
10821   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10822   int64_t cur_level = maybe_layer->layerId();
10823   if (!isBatchedAtLevel(y, cur_level)) {
10824     return at::_ops::trapezoid_dx::call(y, dx, dim);
10825   }
10826   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
10827   auto results = batch_rule(y_value, y_bdim, dx, dim);
10828   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10829 }
10830 template <typename batch_rule_t, batch_rule_t batch_rule>
10831 at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
10832   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10833   auto maybe_layer = maybeCurrentDynamicLayer();
10834   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10835   int64_t cur_level = maybe_layer->layerId();
10836   if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
10837     return at::_ops::trapz_x::call(y, x, dim);
10838   }
10839   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
10840   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
10841   auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
10842   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10843 }
10844 template <typename batch_rule_t, batch_rule_t batch_rule>
10845 at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) {
10846   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10847   auto maybe_layer = maybeCurrentDynamicLayer();
10848   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10849   int64_t cur_level = maybe_layer->layerId();
10850   if (!isBatchedAtLevel(y, cur_level)) {
10851     return at::_ops::trapz_dx::call(y, dx, dim);
10852   }
10853   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
10854   auto results = batch_rule(y_value, y_bdim, dx, dim);
10855   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10856 }
10857 template <typename batch_rule_t, batch_rule_t batch_rule>
10858 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
10859   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10860   auto maybe_layer = maybeCurrentDynamicLayer();
10861   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10862   int64_t cur_level = maybe_layer->layerId();
10863   if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) {
10864     return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads);
10865   }
10866   auto [qkv_value, qkv_bdim] = unwrapTensorAtLevel(qkv, cur_level);
10867   auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level);
10868   auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads);
10869   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
10870 }
10871 template <typename batch_rule_t, batch_rule_t batch_rule>
10872 at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
10873   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10874   auto maybe_layer = maybeCurrentDynamicLayer();
10875   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10876   int64_t cur_level = maybe_layer->layerId();
10877   if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
10878     return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check);
10879   }
10880   auto [t_value, t_bdim] = unwrapTensorAtLevel(t, cur_level);
10881   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
10882   auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check);
10883   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10884 }
10885 template <typename batch_rule_t, batch_rule_t batch_rule>
10886 at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
10887   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10888   auto maybe_layer = maybeCurrentDynamicLayer();
10889   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10890   int64_t cur_level = maybe_layer->layerId();
10891   if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) {
10892     return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213);
10893   }
10894   auto [padded_value, padded_bdim] = unwrapTensorAtLevel(padded, cur_level);
10895   auto [cpu_nested_shape_example_value, cpu_nested_shape_example_bdim] = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level);
10896   auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213);
10897   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10898 }
10899 template <typename batch_rule_t, batch_rule_t batch_rule>
10900 at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) {
10901   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10902   auto maybe_layer = maybeCurrentDynamicLayer();
10903   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10904   int64_t cur_level = maybe_layer->layerId();
10905   if (!isBatchedAtLevel(self, cur_level)) {
10906     return at::_ops::_nested_tensor_size::call(self);
10907   }
10908   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10909   auto results = batch_rule(self_value, self_bdim);
10910   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10911 }
10912 template <typename batch_rule_t, batch_rule_t batch_rule>
10913 at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) {
10914   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10915   auto maybe_layer = maybeCurrentDynamicLayer();
10916   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10917   int64_t cur_level = maybe_layer->layerId();
10918   if (!isBatchedAtLevel(self, cur_level)) {
10919     return at::_ops::_nested_tensor_strides::call(self);
10920   }
10921   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10922   auto results = batch_rule(self_value, self_bdim);
10923   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10924 }
10925 template <typename batch_rule_t, batch_rule_t batch_rule>
10926 at::Tensor _nested_tensor_storage_offsets_generated_plumbing(const at::Tensor & self) {
10927   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10928   auto maybe_layer = maybeCurrentDynamicLayer();
10929   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10930   int64_t cur_level = maybe_layer->layerId();
10931   if (!isBatchedAtLevel(self, cur_level)) {
10932     return at::_ops::_nested_tensor_storage_offsets::call(self);
10933   }
10934   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10935   auto results = batch_rule(self_value, self_bdim);
10936   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10937 }
10938 template <typename batch_rule_t, batch_rule_t batch_rule>
10939 at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) {
10940   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10941   auto maybe_layer = maybeCurrentDynamicLayer();
10942   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10943   int64_t cur_level = maybe_layer->layerId();
10944   if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) {
10945     return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example);
10946   }
10947   auto [padded_value, padded_bdim] = unwrapTensorAtLevel(padded, cur_level);
10948   auto [nt_example_value, nt_example_bdim] = unwrapTensorAtLevel(nt_example, cur_level);
10949   auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim);
10950   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10951 }
10952 template <typename batch_rule_t, batch_rule_t batch_rule>
10953 at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
10954   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10955   auto maybe_layer = maybeCurrentDynamicLayer();
10956   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10957   int64_t cur_level = maybe_layer->layerId();
10958   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
10959     return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
10960   }
10961   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10962   auto [nested_size_value, nested_size_bdim] = unwrapTensorAtLevel(nested_size, cur_level);
10963   auto [nested_strides_value, nested_strides_bdim] = unwrapTensorAtLevel(nested_strides, cur_level);
10964   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
10965   auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim);
10966   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10967 }
10968 template <typename batch_rule_t, batch_rule_t batch_rule>
10969 at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
10970   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10971   auto maybe_layer = maybeCurrentDynamicLayer();
10972   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10973   int64_t cur_level = maybe_layer->layerId();
10974   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
10975     return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
10976   }
10977   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10978   auto [nested_size_value, nested_size_bdim] = unwrapTensorAtLevel(nested_size, cur_level);
10979   auto [nested_strides_value, nested_strides_bdim] = unwrapTensorAtLevel(nested_strides, cur_level);
10980   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
10981   auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim);
10982   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10983 }
10984 template <typename batch_rule_t, batch_rule_t batch_rule>
10985 at::Tensor _nested_view_from_jagged_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
10986   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10987   auto maybe_layer = maybeCurrentDynamicLayer();
10988   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10989   int64_t cur_level = maybe_layer->layerId();
10990   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(min_seqlen, cur_level) && !isBatchedAtLevel(max_seqlen, cur_level)) {
10991     return at::_ops::_nested_view_from_jagged::call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
10992   }
10993   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
10994   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
10995   auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level);
10996   std::optional<Tensor> lengths_value;
10997   std::optional<int64_t> lengths_bdim;
10998   if (lengths) {
10999       std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
11000   }
11001   std::optional<Tensor> min_seqlen_value;
11002   std::optional<int64_t> min_seqlen_bdim;
11003   if (min_seqlen) {
11004       std::tie(min_seqlen_value, min_seqlen_bdim) = unwrapTensorAtLevel(min_seqlen.value(), cur_level);
11005   }
11006   std::optional<Tensor> max_seqlen_value;
11007   std::optional<int64_t> max_seqlen_bdim;
11008   if (max_seqlen) {
11009       std::tie(max_seqlen_value, max_seqlen_bdim) = unwrapTensorAtLevel(max_seqlen.value(), cur_level);
11010   }
11011   auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx, min_seqlen_value, min_seqlen_bdim, max_seqlen_value, max_seqlen_bdim);
11012   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11013 }
11014 template <typename batch_rule_t, batch_rule_t batch_rule>
11015 at::Tensor _nested_view_from_jagged_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
11016   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11017   auto maybe_layer = maybeCurrentDynamicLayer();
11018   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11019   int64_t cur_level = maybe_layer->layerId();
11020   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(min_seqlen, cur_level) && !isBatchedAtLevel(max_seqlen, cur_level)) {
11021     return at::_ops::_nested_view_from_jagged_copy::call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
11022   }
11023   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11024   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
11025   auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level);
11026   std::optional<Tensor> lengths_value;
11027   std::optional<int64_t> lengths_bdim;
11028   if (lengths) {
11029       std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
11030   }
11031   std::optional<Tensor> min_seqlen_value;
11032   std::optional<int64_t> min_seqlen_bdim;
11033   if (min_seqlen) {
11034       std::tie(min_seqlen_value, min_seqlen_bdim) = unwrapTensorAtLevel(min_seqlen.value(), cur_level);
11035   }
11036   std::optional<Tensor> max_seqlen_value;
11037   std::optional<int64_t> max_seqlen_bdim;
11038   if (max_seqlen) {
11039       std::tie(max_seqlen_value, max_seqlen_bdim) = unwrapTensorAtLevel(max_seqlen.value(), cur_level);
11040   }
11041   auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx, min_seqlen_value, min_seqlen_bdim, max_seqlen_value, max_seqlen_bdim);
11042   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11043 }
11044 template <typename batch_rule_t, batch_rule_t batch_rule>
11045 at::Tensor _nested_get_values_generated_plumbing(const at::Tensor & self) {
11046   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11047   auto maybe_layer = maybeCurrentDynamicLayer();
11048   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11049   int64_t cur_level = maybe_layer->layerId();
11050   if (!isBatchedAtLevel(self, cur_level)) {
11051     return at::_ops::_nested_get_values::call(self);
11052   }
11053   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11054   auto results = batch_rule(self_value, self_bdim);
11055   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11056 }
11057 template <typename batch_rule_t, batch_rule_t batch_rule>
11058 at::Tensor _nested_get_values_copy_generated_plumbing(const at::Tensor & self) {
11059   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11060   auto maybe_layer = maybeCurrentDynamicLayer();
11061   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11062   int64_t cur_level = maybe_layer->layerId();
11063   if (!isBatchedAtLevel(self, cur_level)) {
11064     return at::_ops::_nested_get_values_copy::call(self);
11065   }
11066   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11067   auto results = batch_rule(self_value, self_bdim);
11068   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11069 }
11070 template <typename batch_rule_t, batch_rule_t batch_rule>
11071 at::Tensor _nested_get_offsets_generated_plumbing(const at::Tensor & self) {
11072   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11073   auto maybe_layer = maybeCurrentDynamicLayer();
11074   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11075   int64_t cur_level = maybe_layer->layerId();
11076   if (!isBatchedAtLevel(self, cur_level)) {
11077     return at::_ops::_nested_get_offsets::call(self);
11078   }
11079   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11080   auto results = batch_rule(self_value, self_bdim);
11081   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11082 }
11083 template <typename batch_rule_t, batch_rule_t batch_rule>
11084 at::Tensor _nested_get_lengths_generated_plumbing(const at::Tensor & self) {
11085   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11086   auto maybe_layer = maybeCurrentDynamicLayer();
11087   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11088   int64_t cur_level = maybe_layer->layerId();
11089   if (!isBatchedAtLevel(self, cur_level)) {
11090     return at::_ops::_nested_get_lengths::call(self);
11091   }
11092   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11093   auto results = batch_rule(self_value, self_bdim);
11094   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11095 }
11096 template <typename batch_rule_t, batch_rule_t batch_rule>
11097 at::Tensor _nested_get_min_seqlen_generated_plumbing(const at::Tensor & self) {
11098   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11099   auto maybe_layer = maybeCurrentDynamicLayer();
11100   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11101   int64_t cur_level = maybe_layer->layerId();
11102   if (!isBatchedAtLevel(self, cur_level)) {
11103     return at::_ops::_nested_get_min_seqlen::call(self);
11104   }
11105   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11106   auto results = batch_rule(self_value, self_bdim);
11107   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11108 }
11109 template <typename batch_rule_t, batch_rule_t batch_rule>
11110 at::Tensor _nested_get_max_seqlen_generated_plumbing(const at::Tensor & self) {
11111   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11112   auto maybe_layer = maybeCurrentDynamicLayer();
11113   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11114   int64_t cur_level = maybe_layer->layerId();
11115   if (!isBatchedAtLevel(self, cur_level)) {
11116     return at::_ops::_nested_get_max_seqlen::call(self);
11117   }
11118   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11119   auto results = batch_rule(self_value, self_bdim);
11120   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11121 }
11122 template <typename batch_rule_t, batch_rule_t batch_rule>
11123 at::Tensor _nested_get_jagged_dummy_generated_plumbing(const at::Tensor & any) {
11124   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11125   auto maybe_layer = maybeCurrentDynamicLayer();
11126   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11127   int64_t cur_level = maybe_layer->layerId();
11128   if (!isBatchedAtLevel(any, cur_level)) {
11129     return at::_ops::_nested_get_jagged_dummy::call(any);
11130   }
11131   auto [any_value, any_bdim] = unwrapTensorAtLevel(any, cur_level);
11132   auto results = batch_rule(any_value, any_bdim);
11133   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11134 }
11135 template <typename batch_rule_t, batch_rule_t batch_rule>
11136 ::std::tuple<at::Tensor,at::Tensor> _nested_compute_contiguous_strides_offsets_generated_plumbing(const at::Tensor & nested_size) {
11137   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11138   auto maybe_layer = maybeCurrentDynamicLayer();
11139   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11140   int64_t cur_level = maybe_layer->layerId();
11141   if (!isBatchedAtLevel(nested_size, cur_level)) {
11142     return at::_ops::_nested_compute_contiguous_strides_offsets::call(nested_size);
11143   }
11144   auto [nested_size_value, nested_size_bdim] = unwrapTensorAtLevel(nested_size, cur_level);
11145   auto results = batch_rule(nested_size_value, nested_size_bdim);
11146   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11147 }
11148 template <typename batch_rule_t, batch_rule_t batch_rule>
11149 at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
11150   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11151   auto maybe_layer = maybeCurrentDynamicLayer();
11152   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11153   int64_t cur_level = maybe_layer->layerId();
11154   if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) {
11155     return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
11156   }
11157   auto [i1_value, i1_bdim] = unwrapTensorAtLevel(i1, cur_level);
11158   auto [i2_value, i2_bdim] = unwrapTensorAtLevel(i2, cur_level);
11159   auto [i3_value, i3_bdim] = unwrapTensorAtLevel(i3, cur_level);
11160   auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim);
11161   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11162 }
11163 template <typename batch_rule_t, batch_rule_t batch_rule>
11164 at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
11165   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11166   auto maybe_layer = maybeCurrentDynamicLayer();
11167   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11168   int64_t cur_level = maybe_layer->layerId();
11169   if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) {
11170     return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction);
11171   }
11172   auto [anchor_value, anchor_bdim] = unwrapTensorAtLevel(anchor, cur_level);
11173   auto [positive_value, positive_bdim] = unwrapTensorAtLevel(positive, cur_level);
11174   auto [negative_value, negative_bdim] = unwrapTensorAtLevel(negative, cur_level);
11175   auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction);
11176   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11177 }
11178 template <typename batch_rule_t, batch_rule_t batch_rule>
11179 at::Tensor trunc_generated_plumbing(const at::Tensor & self) {
11180   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11181   auto maybe_layer = maybeCurrentDynamicLayer();
11182   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11183   int64_t cur_level = maybe_layer->layerId();
11184   if (!isBatchedAtLevel(self, cur_level)) {
11185     return at::_ops::trunc::call(self);
11186   }
11187   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11188   auto results = batch_rule(self_value, self_bdim);
11189   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11190 }
11191 template <typename batch_rule_t, batch_rule_t batch_rule>
11192 at::Tensor & trunc__generated_plumbing(at::Tensor & self) {
11193   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11194   auto maybe_layer = maybeCurrentDynamicLayer();
11195   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11196   int64_t cur_level = maybe_layer->layerId();
11197   if (!isBatchedAtLevel(self, cur_level)) {
11198     return at::_ops::trunc_::call(self);
11199   }
11200   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11201   batch_rule(self_value, self_bdim);
11202   return self;
11203 }
11204 template <typename batch_rule_t, batch_rule_t batch_rule>
11205 at::Tensor fix_generated_plumbing(const at::Tensor & self) {
11206   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11207   auto maybe_layer = maybeCurrentDynamicLayer();
11208   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11209   int64_t cur_level = maybe_layer->layerId();
11210   if (!isBatchedAtLevel(self, cur_level)) {
11211     return at::_ops::fix::call(self);
11212   }
11213   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11214   auto results = batch_rule(self_value, self_bdim);
11215   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11216 }
11217 template <typename batch_rule_t, batch_rule_t batch_rule>
11218 at::Tensor & fix__generated_plumbing(at::Tensor & self) {
11219   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11220   auto maybe_layer = maybeCurrentDynamicLayer();
11221   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11222   int64_t cur_level = maybe_layer->layerId();
11223   if (!isBatchedAtLevel(self, cur_level)) {
11224     return at::_ops::fix_::call(self);
11225   }
11226   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11227   batch_rule(self_value, self_bdim);
11228   return self;
11229 }
11230 template <typename batch_rule_t, batch_rule_t batch_rule>
11231 at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
11232   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11233   auto maybe_layer = maybeCurrentDynamicLayer();
11234   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11235   int64_t cur_level = maybe_layer->layerId();
11236   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
11237     return at::_ops::type_as::call(self, other);
11238   }
11239   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11240   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
11241   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
11242   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11243 }
11244 template <typename batch_rule_t, batch_rule_t batch_rule>
11245 ::std::tuple<at::Tensor,at::Tensor> _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) {
11246   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11247   auto maybe_layer = maybeCurrentDynamicLayer();
11248   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11249   int64_t cur_level = maybe_layer->layerId();
11250   if (!isBatchedAtLevel(self, cur_level)) {
11251     return at::_ops::_unique::call(self, sorted, return_inverse);
11252   }
11253   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11254   auto results = batch_rule(self_value, self_bdim, sorted, return_inverse);
11255   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11256 }
11257 template <typename batch_rule_t, batch_rule_t batch_rule>
11258 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
11259   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11260   auto maybe_layer = maybeCurrentDynamicLayer();
11261   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11262   int64_t cur_level = maybe_layer->layerId();
11263   if (!isBatchedAtLevel(self, cur_level)) {
11264     return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts);
11265   }
11266   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11267   auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts);
11268   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
11269 }
11270 template <typename batch_rule_t, batch_rule_t batch_rule>
11271 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional<int64_t> dim) {
11272   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11273   auto maybe_layer = maybeCurrentDynamicLayer();
11274   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11275   int64_t cur_level = maybe_layer->layerId();
11276   if (!isBatchedAtLevel(self, cur_level)) {
11277     return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim);
11278   }
11279   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11280   auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim);
11281   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
11282 }
11283 template <typename batch_rule_t, batch_rule_t batch_rule>
11284 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
11285   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11286   auto maybe_layer = maybeCurrentDynamicLayer();
11287   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11288   int64_t cur_level = maybe_layer->layerId();
11289   if (!isBatchedAtLevel(self, cur_level)) {
11290     return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts);
11291   }
11292   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11293   auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts);
11294   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
11295 }
11296 template <typename batch_rule_t, batch_rule_t batch_rule>
11297 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
11298   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11299   auto maybe_layer = maybeCurrentDynamicLayer();
11300   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11301   int64_t cur_level = maybe_layer->layerId();
11302   if (!isBatchedAtLevel(self, cur_level)) {
11303     return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts);
11304   }
11305   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11306   auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts);
11307   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
11308 }
11309 template <typename batch_rule_t, batch_rule_t batch_rule>
11310 at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
11311   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11312   auto maybe_layer = maybeCurrentDynamicLayer();
11313   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11314   int64_t cur_level = maybe_layer->layerId();
11315   if (!isBatchedAtLevel(self, cur_level)) {
11316     return at::_ops::_unsafe_view::call(self, size);
11317   }
11318   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11319   auto results = batch_rule(self_value, self_bdim, size);
11320   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11321 }
11322 template <typename batch_rule_t, batch_rule_t batch_rule>
11323 at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) {
11324   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11325   auto maybe_layer = maybeCurrentDynamicLayer();
11326   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11327   int64_t cur_level = maybe_layer->layerId();
11328   if (!isBatchedAtLevel(self, cur_level)) {
11329     return at::_ops::unsqueeze::call(self, dim);
11330   }
11331   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11332   auto results = batch_rule(self_value, self_bdim, dim);
11333   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11334 }
11335 template <typename batch_rule_t, batch_rule_t batch_rule>
11336 at::Tensor vander_generated_plumbing(const at::Tensor & x, ::std::optional<int64_t> N, bool increasing) {
11337   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11338   auto maybe_layer = maybeCurrentDynamicLayer();
11339   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11340   int64_t cur_level = maybe_layer->layerId();
11341   if (!isBatchedAtLevel(x, cur_level)) {
11342     return at::_ops::vander::call(x, N, increasing);
11343   }
11344   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
11345   auto results = batch_rule(x_value, x_bdim, N, increasing);
11346   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11347 }
11348 template <typename batch_rule_t, batch_rule_t batch_rule>
11349 at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) {
11350   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11351   auto maybe_layer = maybeCurrentDynamicLayer();
11352   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11353   int64_t cur_level = maybe_layer->layerId();
11354   if (!isBatchedAtLevel(self, cur_level)) {
11355     return at::_ops::var::call(self, unbiased);
11356   }
11357   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11358   auto results = batch_rule(self_value, self_bdim, unbiased);
11359   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11360 }
11361 template <typename batch_rule_t, batch_rule_t batch_rule>
11362 at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
11363   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11364   auto maybe_layer = maybeCurrentDynamicLayer();
11365   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11366   int64_t cur_level = maybe_layer->layerId();
11367   if (!isBatchedAtLevel(self, cur_level)) {
11368     return at::_ops::var_dim::call(self, dim, unbiased, keepdim);
11369   }
11370   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11371   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11372   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11373 }
11374 template <typename batch_rule_t, batch_rule_t batch_rule>
11375 at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
11376   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11377   auto maybe_layer = maybeCurrentDynamicLayer();
11378   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11379   int64_t cur_level = maybe_layer->layerId();
11380   if (!isBatchedAtLevel(self, cur_level)) {
11381     return at::_ops::var_correction::call(self, dim, correction, keepdim);
11382   }
11383   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11384   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11385   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11386 }
11387 template <typename batch_rule_t, batch_rule_t batch_rule>
11388 at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
11389   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11390   auto maybe_layer = maybeCurrentDynamicLayer();
11391   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11392   int64_t cur_level = maybe_layer->layerId();
11393   if (!isBatchedAtLevel(self, cur_level)) {
11394     return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim);
11395   }
11396   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11397   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11398   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11399 }
11400 template <typename batch_rule_t, batch_rule_t batch_rule>
11401 at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
11402   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11403   auto maybe_layer = maybeCurrentDynamicLayer();
11404   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11405   int64_t cur_level = maybe_layer->layerId();
11406   if (!isBatchedAtLevel(self, cur_level)) {
11407     return at::_ops::var_correction_names::call(self, dim, correction, keepdim);
11408   }
11409   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11410   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11411   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11412 }
11413 template <typename batch_rule_t, batch_rule_t batch_rule>
11414 ::std::tuple<at::Tensor,at::Tensor> var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
11415   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11416   auto maybe_layer = maybeCurrentDynamicLayer();
11417   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11418   int64_t cur_level = maybe_layer->layerId();
11419   if (!isBatchedAtLevel(self, cur_level)) {
11420     return at::_ops::var_mean::call(self, unbiased);
11421   }
11422   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11423   auto results = batch_rule(self_value, self_bdim, unbiased);
11424   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11425 }
11426 template <typename batch_rule_t, batch_rule_t batch_rule>
11427 ::std::tuple<at::Tensor,at::Tensor> var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
11428   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11429   auto maybe_layer = maybeCurrentDynamicLayer();
11430   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11431   int64_t cur_level = maybe_layer->layerId();
11432   if (!isBatchedAtLevel(self, cur_level)) {
11433     return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim);
11434   }
11435   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11436   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11437   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11438 }
11439 template <typename batch_rule_t, batch_rule_t batch_rule>
11440 ::std::tuple<at::Tensor,at::Tensor> var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
11441   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11442   auto maybe_layer = maybeCurrentDynamicLayer();
11443   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11444   int64_t cur_level = maybe_layer->layerId();
11445   if (!isBatchedAtLevel(self, cur_level)) {
11446     return at::_ops::var_mean_correction::call(self, dim, correction, keepdim);
11447   }
11448   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11449   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11450   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11451 }
11452 template <typename batch_rule_t, batch_rule_t batch_rule>
11453 ::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
11454   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11455   auto maybe_layer = maybeCurrentDynamicLayer();
11456   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11457   int64_t cur_level = maybe_layer->layerId();
11458   if (!isBatchedAtLevel(self, cur_level)) {
11459     return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim);
11460   }
11461   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11462   auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11463   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11464 }
11465 template <typename batch_rule_t, batch_rule_t batch_rule>
11466 ::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
11467   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11468   auto maybe_layer = maybeCurrentDynamicLayer();
11469   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11470   int64_t cur_level = maybe_layer->layerId();
11471   if (!isBatchedAtLevel(self, cur_level)) {
11472     return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim);
11473   }
11474   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11475   auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11476   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11477 }
11478 template <typename batch_rule_t, batch_rule_t batch_rule>
11479 at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
11480   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11481   auto maybe_layer = maybeCurrentDynamicLayer();
11482   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11483   int64_t cur_level = maybe_layer->layerId();
11484   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
11485     return at::_ops::view_as::call(self, other);
11486   }
11487   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11488   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
11489   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
11490   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11491 }
11492 template <typename batch_rule_t, batch_rule_t batch_rule>
11493 at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
11494   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11495   auto maybe_layer = maybeCurrentDynamicLayer();
11496   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11497   int64_t cur_level = maybe_layer->layerId();
11498   if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
11499     return at::_ops::where_self::call(condition, self, other);
11500   }
11501   auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level);
11502   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11503   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
11504   auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim);
11505   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11506 }
11507 template <typename batch_rule_t, batch_rule_t batch_rule>
11508 at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
11509   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11510   auto maybe_layer = maybeCurrentDynamicLayer();
11511   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11512   int64_t cur_level = maybe_layer->layerId();
11513   if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) {
11514     return at::_ops::where_ScalarSelf::call(condition, self, other);
11515   }
11516   auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level);
11517   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
11518   auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim);
11519   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11520 }
11521 template <typename batch_rule_t, batch_rule_t batch_rule>
11522 at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
11523   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11524   auto maybe_layer = maybeCurrentDynamicLayer();
11525   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11526   int64_t cur_level = maybe_layer->layerId();
11527   if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) {
11528     return at::_ops::where_ScalarOther::call(condition, self, other);
11529   }
11530   auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level);
11531   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11532   auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other);
11533   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11534 }
11535 template <typename batch_rule_t, batch_rule_t batch_rule>
11536 at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
11537   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11538   auto maybe_layer = maybeCurrentDynamicLayer();
11539   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11540   int64_t cur_level = maybe_layer->layerId();
11541   if (!isBatchedAtLevel(condition, cur_level)) {
11542     return at::_ops::where_Scalar::call(condition, self, other);
11543   }
11544   auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level);
11545   auto results = batch_rule(condition_value, condition_bdim, self, other);
11546   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11547 }
11548 template <typename batch_rule_t, batch_rule_t batch_rule>
11549 ::std::vector<at::Tensor> where_generated_plumbing(const at::Tensor & condition) {
11550   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11551   auto maybe_layer = maybeCurrentDynamicLayer();
11552   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11553   int64_t cur_level = maybe_layer->layerId();
11554   if (!isBatchedAtLevel(condition, cur_level)) {
11555     return at::_ops::where::call(condition);
11556   }
11557   auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level);
11558   auto results = batch_rule(condition_value, condition_bdim);
11559   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11560 }
11561 template <typename batch_rule_t, batch_rule_t batch_rule>
11562 at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) {
11563   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11564   auto maybe_layer = maybeCurrentDynamicLayer();
11565   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11566   int64_t cur_level = maybe_layer->layerId();
11567   if (!isBatchedAtLevel(v, cur_level)) {
11568     return at::_ops::norm_except_dim::call(v, pow, dim);
11569   }
11570   auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level);
11571   auto results = batch_rule(v_value, v_bdim, pow, dim);
11572   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11573 }
11574 template <typename batch_rule_t, batch_rule_t batch_rule>
11575 at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
11576   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11577   auto maybe_layer = maybeCurrentDynamicLayer();
11578   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11579   int64_t cur_level = maybe_layer->layerId();
11580   if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
11581     return at::_ops::_weight_norm::call(v, g, dim);
11582   }
11583   auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level);
11584   auto [g_value, g_bdim] = unwrapTensorAtLevel(g, cur_level);
11585   auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
11586   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11587 }
11588 template <typename batch_rule_t, batch_rule_t batch_rule>
11589 ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
11590   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11591   auto maybe_layer = maybeCurrentDynamicLayer();
11592   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11593   int64_t cur_level = maybe_layer->layerId();
11594   if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
11595     return at::_ops::_weight_norm_interface::call(v, g, dim);
11596   }
11597   auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level);
11598   auto [g_value, g_bdim] = unwrapTensorAtLevel(g, cur_level);
11599   auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
11600   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11601 }
11602 template <typename batch_rule_t, batch_rule_t batch_rule>
11603 ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
11604   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11605   auto maybe_layer = maybeCurrentDynamicLayer();
11606   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11607   int64_t cur_level = maybe_layer->layerId();
11608   if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
11609     return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
11610   }
11611   auto [grad_w_value, grad_w_bdim] = unwrapTensorAtLevel(grad_w, cur_level);
11612   auto [saved_v_value, saved_v_bdim] = unwrapTensorAtLevel(saved_v, cur_level);
11613   auto [saved_g_value, saved_g_bdim] = unwrapTensorAtLevel(saved_g, cur_level);
11614   auto [saved_norms_value, saved_norms_bdim] = unwrapTensorAtLevel(saved_norms, cur_level);
11615   auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
11616   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11617 }
11618 template <typename batch_rule_t, batch_rule_t batch_rule>
11619 ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
11620   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11621   auto maybe_layer = maybeCurrentDynamicLayer();
11622   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11623   int64_t cur_level = maybe_layer->layerId();
11624   if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
11625     return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
11626   }
11627   auto [grad_w_value, grad_w_bdim] = unwrapTensorAtLevel(grad_w, cur_level);
11628   auto [saved_v_value, saved_v_bdim] = unwrapTensorAtLevel(saved_v, cur_level);
11629   auto [saved_g_value, saved_g_bdim] = unwrapTensorAtLevel(saved_g, cur_level);
11630   auto [saved_norms_value, saved_norms_bdim] = unwrapTensorAtLevel(saved_norms, cur_level);
11631   auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
11632   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11633 }
11634 template <typename batch_rule_t, batch_rule_t batch_rule>
11635 at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
11636   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11637   auto maybe_layer = maybeCurrentDynamicLayer();
11638   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11639   int64_t cur_level = maybe_layer->layerId();
11640   if (!isBatchedAtLevel(self, cur_level)) {
11641     return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format);
11642   }
11643   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11644   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
11645   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11646 }
11647 template <typename batch_rule_t, batch_rule_t batch_rule>
11648 at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) {
11649   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11650   auto maybe_layer = maybeCurrentDynamicLayer();
11651   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11652   int64_t cur_level = maybe_layer->layerId();
11653   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) {
11654     return at::_ops::_standard_gamma_grad::call(self, output);
11655   }
11656   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11657   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
11658   auto results = batch_rule(self_value, self_bdim, output_value, output_bdim);
11659   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11660 }
11661 template <typename batch_rule_t, batch_rule_t batch_rule>
11662 at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, ::std::optional<at::Generator> generator) {
11663   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11664   auto maybe_layer = maybeCurrentDynamicLayer();
11665   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11666   int64_t cur_level = maybe_layer->layerId();
11667   if (!isBatchedAtLevel(self, cur_level)) {
11668     return at::_ops::_standard_gamma::call(self, generator);
11669   }
11670   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11671   auto results = batch_rule(self_value, self_bdim, generator);
11672   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11673 }
11674 template <typename batch_rule_t, batch_rule_t batch_rule>
11675 at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
11676   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11677   auto maybe_layer = maybeCurrentDynamicLayer();
11678   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11679   int64_t cur_level = maybe_layer->layerId();
11680   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) {
11681     return at::_ops::_dirichlet_grad::call(x, alpha, total);
11682   }
11683   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
11684   auto [alpha_value, alpha_bdim] = unwrapTensorAtLevel(alpha, cur_level);
11685   auto [total_value, total_bdim] = unwrapTensorAtLevel(total, cur_level);
11686   auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim);
11687   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11688 }
11689 template <typename batch_rule_t, batch_rule_t batch_rule>
11690 at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, ::std::optional<at::Generator> generator) {
11691   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11692   auto maybe_layer = maybeCurrentDynamicLayer();
11693   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11694   int64_t cur_level = maybe_layer->layerId();
11695   if (!isBatchedAtLevel(self, cur_level)) {
11696     return at::_ops::_sample_dirichlet::call(self, generator);
11697   }
11698   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11699   auto results = batch_rule(self_value, self_bdim, generator);
11700   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11701 }
11702 template <typename batch_rule_t, batch_rule_t batch_rule>
11703 at::Tensor poisson_generated_plumbing(const at::Tensor & self, ::std::optional<at::Generator> generator) {
11704   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11705   auto maybe_layer = maybeCurrentDynamicLayer();
11706   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11707   int64_t cur_level = maybe_layer->layerId();
11708   if (!isBatchedAtLevel(self, cur_level)) {
11709     return at::_ops::poisson::call(self, generator);
11710   }
11711   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11712   auto results = batch_rule(self_value, self_bdim, generator);
11713   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11714 }
11715 template <typename batch_rule_t, batch_rule_t batch_rule>
11716 at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator) {
11717   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11718   auto maybe_layer = maybeCurrentDynamicLayer();
11719   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11720   int64_t cur_level = maybe_layer->layerId();
11721   if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) {
11722     return at::_ops::binomial::call(count, prob, generator);
11723   }
11724   auto [count_value, count_bdim] = unwrapTensorAtLevel(count, cur_level);
11725   auto [prob_value, prob_bdim] = unwrapTensorAtLevel(prob, cur_level);
11726   auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator);
11727   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11728 }
11729 template <typename batch_rule_t, batch_rule_t batch_rule>
11730 at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
11731   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11732   auto maybe_layer = maybeCurrentDynamicLayer();
11733   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11734   int64_t cur_level = maybe_layer->layerId();
11735   if (!isBatchedAtLevel(self, cur_level)) {
11736     return at::_ops::native_norm::call(self, p);
11737   }
11738   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11739   auto results = batch_rule(self_value, self_bdim, p);
11740   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11741 }
11742 template <typename batch_rule_t, batch_rule_t batch_rule>
11743 at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
11744   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11745   auto maybe_layer = maybeCurrentDynamicLayer();
11746   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11747   int64_t cur_level = maybe_layer->layerId();
11748   if (!isBatchedAtLevel(self, cur_level)) {
11749     return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
11750   }
11751   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11752   auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
11753   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11754 }
11755 template <typename batch_rule_t, batch_rule_t batch_rule>
11756 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_no_update_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps) {
11757   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11758   auto maybe_layer = maybeCurrentDynamicLayer();
11759   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11760   int64_t cur_level = maybe_layer->layerId();
11761   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
11762     return at::_ops::_batch_norm_no_update::call(input, weight, bias, running_mean, running_var, momentum, eps);
11763   }
11764   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
11765   std::optional<Tensor> weight_value;
11766   std::optional<int64_t> weight_bdim;
11767   if (weight) {
11768       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
11769   }
11770   std::optional<Tensor> bias_value;
11771   std::optional<int64_t> bias_bdim;
11772   if (bias) {
11773       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
11774   }
11775   std::optional<Tensor> running_mean_value;
11776   std::optional<int64_t> running_mean_bdim;
11777   if (running_mean) {
11778       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
11779   }
11780   std::optional<Tensor> running_var_value;
11781   std::optional<int64_t> running_var_bdim;
11782   if (running_var) {
11783       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
11784   }
11785   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps);
11786   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
11787 }
11788 template <typename batch_rule_t, batch_rule_t batch_rule>
11789 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve) {
11790   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11791   auto maybe_layer = maybeCurrentDynamicLayer();
11792   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11793   int64_t cur_level = maybe_layer->layerId();
11794   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
11795     return at::_ops::batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve);
11796   }
11797   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
11798   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
11799   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
11800   auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level);
11801   std::optional<Tensor> running_mean_value;
11802   std::optional<int64_t> running_mean_bdim;
11803   if (running_mean) {
11804       std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
11805   }
11806   std::optional<Tensor> running_var_value;
11807   std::optional<int64_t> running_var_bdim;
11808   if (running_var) {
11809       std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
11810   }
11811   std::optional<Tensor> save_mean_value;
11812   std::optional<int64_t> save_mean_bdim;
11813   if (save_mean) {
11814       std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
11815   }
11816   std::optional<Tensor> save_var_value;
11817   std::optional<int64_t> save_var_bdim;
11818   if (save_var) {
11819       std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
11820   }
11821   auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, update, eps, output_mask, reserve_value, reserve_bdim);
11822   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
11823 }
11824 template <typename batch_rule_t, batch_rule_t batch_rule>
11825 at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) {
11826   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11827   auto maybe_layer = maybeCurrentDynamicLayer();
11828   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11829   int64_t cur_level = maybe_layer->layerId();
11830   if (!isBatchedAtLevel(self, cur_level)) {
11831     return at::_ops::_sparse_sum::call(self);
11832   }
11833   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11834   auto results = batch_rule(self_value, self_bdim);
11835   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11836 }
11837 template <typename batch_rule_t, batch_rule_t batch_rule>
11838 at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
11839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11840   auto maybe_layer = maybeCurrentDynamicLayer();
11841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11842   int64_t cur_level = maybe_layer->layerId();
11843   if (!isBatchedAtLevel(self, cur_level)) {
11844     return at::_ops::_sparse_sum_dtype::call(self, dtype);
11845   }
11846   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11847   auto results = batch_rule(self_value, self_bdim, dtype);
11848   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11849 }
11850 template <typename batch_rule_t, batch_rule_t batch_rule>
11851 at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
11852   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11853   auto maybe_layer = maybeCurrentDynamicLayer();
11854   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11855   int64_t cur_level = maybe_layer->layerId();
11856   if (!isBatchedAtLevel(self, cur_level)) {
11857     return at::_ops::_sparse_sum_dim::call(self, dim);
11858   }
11859   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11860   auto results = batch_rule(self_value, self_bdim, dim);
11861   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11862 }
11863 template <typename batch_rule_t, batch_rule_t batch_rule>
11864 at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
11865   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11866   auto maybe_layer = maybeCurrentDynamicLayer();
11867   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11868   int64_t cur_level = maybe_layer->layerId();
11869   if (!isBatchedAtLevel(self, cur_level)) {
11870     return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype);
11871   }
11872   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11873   auto results = batch_rule(self_value, self_bdim, dim, dtype);
11874   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11875 }
11876 template <typename batch_rule_t, batch_rule_t batch_rule>
11877 at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
11878   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11879   auto maybe_layer = maybeCurrentDynamicLayer();
11880   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11881   int64_t cur_level = maybe_layer->layerId();
11882   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
11883     return at::_ops::_sparse_sum_backward::call(grad, self, dim);
11884   }
11885   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
11886   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11887   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim);
11888   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11889 }
11890 template <typename batch_rule_t, batch_rule_t batch_rule>
11891 at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
11892   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11893   auto maybe_layer = maybeCurrentDynamicLayer();
11894   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11895   int64_t cur_level = maybe_layer->layerId();
11896   if (!isBatchedAtLevel(self, cur_level)) {
11897     return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
11898   }
11899   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11900   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11901   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11902 }
11903 template <typename batch_rule_t, batch_rule_t batch_rule>
11904 at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
11905   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11906   auto maybe_layer = maybeCurrentDynamicLayer();
11907   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11908   int64_t cur_level = maybe_layer->layerId();
11909   if (!isBatchedAtLevel(self, cur_level)) {
11910     return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
11911   }
11912   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11913   auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11914   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11915 }
11916 template <typename batch_rule_t, batch_rule_t batch_rule>
11917 at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
11918   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11919   auto maybe_layer = maybeCurrentDynamicLayer();
11920   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11921   int64_t cur_level = maybe_layer->layerId();
11922   if (!isBatchedAtLevel(self, cur_level)) {
11923     return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
11924   }
11925   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11926   auto results = batch_rule(self_value, self_bdim, dim, dtype);
11927   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11928 }
11929 template <typename batch_rule_t, batch_rule_t batch_rule>
11930 at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
11931   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11932   auto maybe_layer = maybeCurrentDynamicLayer();
11933   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11934   int64_t cur_level = maybe_layer->layerId();
11935   if (!isBatchedAtLevel(self, cur_level)) {
11936     return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
11937   }
11938   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11939   auto results = batch_rule(self_value, self_bdim, dim, dtype);
11940   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11941 }
11942 template <typename batch_rule_t, batch_rule_t batch_rule>
11943 at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
11944   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11945   auto maybe_layer = maybeCurrentDynamicLayer();
11946   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11947   int64_t cur_level = maybe_layer->layerId();
11948   if (!isBatchedAtLevel(self, cur_level)) {
11949     return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
11950   }
11951   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11952   auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
11953   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11954 }
11955 template <typename batch_rule_t, batch_rule_t batch_rule>
11956 at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
11957   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11958   auto maybe_layer = maybeCurrentDynamicLayer();
11959   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11960   int64_t cur_level = maybe_layer->layerId();
11961   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
11962     return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self);
11963   }
11964   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
11965   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
11966   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11967   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
11968   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11969 }
11970 template <typename batch_rule_t, batch_rule_t batch_rule>
11971 at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
11972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11973   auto maybe_layer = maybeCurrentDynamicLayer();
11974   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11975   int64_t cur_level = maybe_layer->layerId();
11976   if (!isBatchedAtLevel(self, cur_level)) {
11977     return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
11978   }
11979   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11980   auto results = batch_rule(self_value, self_bdim, dim, dtype);
11981   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11982 }
11983 template <typename batch_rule_t, batch_rule_t batch_rule>
11984 at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
11985   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11986   auto maybe_layer = maybeCurrentDynamicLayer();
11987   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11988   int64_t cur_level = maybe_layer->layerId();
11989   if (!isBatchedAtLevel(self, cur_level)) {
11990     return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
11991   }
11992   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
11993   auto results = batch_rule(self_value, self_bdim, dim, dtype);
11994   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11995 }
11996 template <typename batch_rule_t, batch_rule_t batch_rule>
11997 at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
11998   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11999   auto maybe_layer = maybeCurrentDynamicLayer();
12000   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12001   int64_t cur_level = maybe_layer->layerId();
12002   if (!isBatchedAtLevel(self, cur_level)) {
12003     return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
12004   }
12005   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12006   auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
12007   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12008 }
12009 template <typename batch_rule_t, batch_rule_t batch_rule>
12010 at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
12011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12012   auto maybe_layer = maybeCurrentDynamicLayer();
12013   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12014   int64_t cur_level = maybe_layer->layerId();
12015   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
12016     return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
12017   }
12018   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
12019   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
12020   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12021   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
12022   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12023 }
12024 template <typename batch_rule_t, batch_rule_t batch_rule>
12025 at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout) {
12026   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12027   auto maybe_layer = maybeCurrentDynamicLayer();
12028   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12029   int64_t cur_level = maybe_layer->layerId();
12030   if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
12031     return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
12032   }
12033   auto [diagonals_value, diagonals_bdim] = unwrapTensorAtLevel(diagonals, cur_level);
12034   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
12035   auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout);
12036   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12037 }
12038 template <typename batch_rule_t, batch_rule_t batch_rule>
12039 at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype) {
12040   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12041   auto maybe_layer = maybeCurrentDynamicLayer();
12042   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12043   int64_t cur_level = maybe_layer->layerId();
12044   if (!isBatchedAtLevel(self, cur_level)) {
12045     return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype);
12046   }
12047   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12048   auto results = batch_rule(self_value, self_bdim, p, dtype);
12049   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12050 }
12051 template <typename batch_rule_t, batch_rule_t batch_rule>
12052 at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
12053   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12054   auto maybe_layer = maybeCurrentDynamicLayer();
12055   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12056   int64_t cur_level = maybe_layer->layerId();
12057   if (!isBatchedAtLevel(self, cur_level)) {
12058     return at::_ops::norm_Scalar::call(self, p);
12059   }
12060   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12061   auto results = batch_rule(self_value, self_bdim, p);
12062   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12063 }
12064 template <typename batch_rule_t, batch_rule_t batch_rule>
12065 at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
12066   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12067   auto maybe_layer = maybeCurrentDynamicLayer();
12068   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12069   int64_t cur_level = maybe_layer->layerId();
12070   if (!isBatchedAtLevel(self, cur_level)) {
12071     return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
12072   }
12073   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12074   auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
12075   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12076 }
12077 template <typename batch_rule_t, batch_rule_t batch_rule>
12078 at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
12079   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12080   auto maybe_layer = maybeCurrentDynamicLayer();
12081   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12082   int64_t cur_level = maybe_layer->layerId();
12083   if (!isBatchedAtLevel(self, cur_level)) {
12084     return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim);
12085   }
12086   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12087   auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
12088   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12089 }
12090 template <typename batch_rule_t, batch_rule_t batch_rule>
12091 at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
12092   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12093   auto maybe_layer = maybeCurrentDynamicLayer();
12094   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12095   int64_t cur_level = maybe_layer->layerId();
12096   if (!isBatchedAtLevel(self, cur_level)) {
12097     return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
12098   }
12099   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12100   auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
12101   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12102 }
12103 template <typename batch_rule_t, batch_rule_t batch_rule>
12104 at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
12105   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12106   auto maybe_layer = maybeCurrentDynamicLayer();
12107   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12108   int64_t cur_level = maybe_layer->layerId();
12109   if (!isBatchedAtLevel(self, cur_level)) {
12110     return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim);
12111   }
12112   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12113   auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
12114   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12115 }
12116 template <typename batch_rule_t, batch_rule_t batch_rule>
12117 ::std::tuple<at::Tensor,at::Tensor> frexp_Tensor_generated_plumbing(const at::Tensor & self) {
12118   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12119   auto maybe_layer = maybeCurrentDynamicLayer();
12120   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12121   int64_t cur_level = maybe_layer->layerId();
12122   if (!isBatchedAtLevel(self, cur_level)) {
12123     return at::_ops::frexp_Tensor::call(self);
12124   }
12125   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12126   auto results = batch_rule(self_value, self_bdim);
12127   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12128 }
12129 template <typename batch_rule_t, batch_rule_t batch_rule>
12130 at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
12131   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12132   auto maybe_layer = maybeCurrentDynamicLayer();
12133   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12134   int64_t cur_level = maybe_layer->layerId();
12135   if (!isBatchedAtLevel(self, cur_level)) {
12136     return at::_ops::frobenius_norm_dim::call(self, dim, keepdim);
12137   }
12138   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12139   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
12140   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12141 }
12142 template <typename batch_rule_t, batch_rule_t batch_rule>
12143 at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) {
12144   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12145   auto maybe_layer = maybeCurrentDynamicLayer();
12146   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12147   int64_t cur_level = maybe_layer->layerId();
12148   if (!isBatchedAtLevel(self, cur_level)) {
12149     return at::_ops::nuclear_norm::call(self, keepdim);
12150   }
12151   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12152   auto results = batch_rule(self_value, self_bdim, keepdim);
12153   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12154 }
12155 template <typename batch_rule_t, batch_rule_t batch_rule>
12156 at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
12157   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12158   auto maybe_layer = maybeCurrentDynamicLayer();
12159   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12160   int64_t cur_level = maybe_layer->layerId();
12161   if (!isBatchedAtLevel(self, cur_level)) {
12162     return at::_ops::nuclear_norm_dim::call(self, dim, keepdim);
12163   }
12164   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12165   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
12166   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12167 }
12168 template <typename batch_rule_t, batch_rule_t batch_rule>
12169 at::Tensor clone_generated_plumbing(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
12170   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12171   auto maybe_layer = maybeCurrentDynamicLayer();
12172   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12173   int64_t cur_level = maybe_layer->layerId();
12174   if (!isBatchedAtLevel(self, cur_level)) {
12175     return at::_ops::clone::call(self, memory_format);
12176   }
12177   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12178   auto results = batch_rule(self_value, self_bdim, memory_format);
12179   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12180 }
12181 template <typename batch_rule_t, batch_rule_t batch_rule>
12182 at::Tensor positive_generated_plumbing(const at::Tensor & self) {
12183   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12184   auto maybe_layer = maybeCurrentDynamicLayer();
12185   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12186   int64_t cur_level = maybe_layer->layerId();
12187   if (!isBatchedAtLevel(self, cur_level)) {
12188     return at::_ops::positive::call(self);
12189   }
12190   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12191   auto results = batch_rule(self_value, self_bdim);
12192   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12193 }
12194 template <typename batch_rule_t, batch_rule_t batch_rule>
12195 const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
12196   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12197   auto maybe_layer = maybeCurrentDynamicLayer();
12198   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12199   int64_t cur_level = maybe_layer->layerId();
12200   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
12201     return at::_ops::resize_as_sparse_::call(self, the_template);
12202   }
12203   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12204   auto [the_template_value, the_template_bdim] = unwrapTensorAtLevel(the_template, cur_level);
12205   batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
12206   return self;
12207 }
12208 template <typename batch_rule_t, batch_rule_t batch_rule>
12209 at::Tensor & zero__generated_plumbing(at::Tensor & self) {
12210   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12211   auto maybe_layer = maybeCurrentDynamicLayer();
12212   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12213   int64_t cur_level = maybe_layer->layerId();
12214   if (!isBatchedAtLevel(self, cur_level)) {
12215     return at::_ops::zero_::call(self);
12216   }
12217   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12218   batch_rule(self_value, self_bdim);
12219   return self;
12220 }
12221 template <typename batch_rule_t, batch_rule_t batch_rule>
12222 at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12223   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12224   auto maybe_layer = maybeCurrentDynamicLayer();
12225   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12226   int64_t cur_level = maybe_layer->layerId();
12227   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12228     return at::_ops::sub_Tensor::call(self, other, alpha);
12229   }
12230   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12231   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
12232   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
12233   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12234 }
12235 template <typename batch_rule_t, batch_rule_t batch_rule>
12236 at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12237   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12238   auto maybe_layer = maybeCurrentDynamicLayer();
12239   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12240   int64_t cur_level = maybe_layer->layerId();
12241   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12242     return at::_ops::sub__Tensor::call(self, other, alpha);
12243   }
12244   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12245   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
12246   batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
12247   return self;
12248 }
12249 template <typename batch_rule_t, batch_rule_t batch_rule>
12250 at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
12251   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12252   auto maybe_layer = maybeCurrentDynamicLayer();
12253   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12254   int64_t cur_level = maybe_layer->layerId();
12255   if (!isBatchedAtLevel(self, cur_level)) {
12256     return at::_ops::sub_Scalar::call(self, other, alpha);
12257   }
12258   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12259   auto results = batch_rule(self_value, self_bdim, other, alpha);
12260   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12261 }
12262 template <typename batch_rule_t, batch_rule_t batch_rule>
12263 at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
12264   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12265   auto maybe_layer = maybeCurrentDynamicLayer();
12266   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12267   int64_t cur_level = maybe_layer->layerId();
12268   if (!isBatchedAtLevel(self, cur_level)) {
12269     return at::_ops::sub__Scalar::call(self, other, alpha);
12270   }
12271   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12272   batch_rule(self_value, self_bdim, other, alpha);
12273   return self;
12274 }
12275 template <typename batch_rule_t, batch_rule_t batch_rule>
12276 at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12277   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12278   auto maybe_layer = maybeCurrentDynamicLayer();
12279   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12280   int64_t cur_level = maybe_layer->layerId();
12281   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12282     return at::_ops::subtract_Tensor::call(self, other, alpha);
12283   }
12284   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12285   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
12286   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
12287   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12288 }
12289 template <typename batch_rule_t, batch_rule_t batch_rule>
12290 at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12291   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12292   auto maybe_layer = maybeCurrentDynamicLayer();
12293   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12294   int64_t cur_level = maybe_layer->layerId();
12295   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12296     return at::_ops::subtract__Tensor::call(self, other, alpha);
12297   }
12298   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12299   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
12300   batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
12301   return self;
12302 }
12303 template <typename batch_rule_t, batch_rule_t batch_rule>
12304 at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
12305   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12306   auto maybe_layer = maybeCurrentDynamicLayer();
12307   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12308   int64_t cur_level = maybe_layer->layerId();
12309   if (!isBatchedAtLevel(self, cur_level)) {
12310     return at::_ops::subtract_Scalar::call(self, other, alpha);
12311   }
12312   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12313   auto results = batch_rule(self_value, self_bdim, other, alpha);
12314   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12315 }
12316 template <typename batch_rule_t, batch_rule_t batch_rule>
12317 at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
12318   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12319   auto maybe_layer = maybeCurrentDynamicLayer();
12320   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12321   int64_t cur_level = maybe_layer->layerId();
12322   if (!isBatchedAtLevel(self, cur_level)) {
12323     return at::_ops::subtract__Scalar::call(self, other, alpha);
12324   }
12325   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12326   batch_rule(self_value, self_bdim, other, alpha);
12327   return self;
12328 }
12329 template <typename batch_rule_t, batch_rule_t batch_rule>
12330 at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12331   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12332   auto maybe_layer = maybeCurrentDynamicLayer();
12333   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12334   int64_t cur_level = maybe_layer->layerId();
12335   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12336     return at::_ops::rsub_Tensor::call(self, other, alpha);
12337   }
12338   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12339   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
12340   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
12341   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12342 }
12343 template <typename batch_rule_t, batch_rule_t batch_rule>
12344 at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) {
12345   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12346   auto maybe_layer = maybeCurrentDynamicLayer();
12347   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12348   int64_t cur_level = maybe_layer->layerId();
12349   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12350     return at::_ops::heaviside::call(self, values);
12351   }
12352   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12353   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12354   auto results = batch_rule(self_value, self_bdim, values_value, values_bdim);
12355   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12356 }
12357 template <typename batch_rule_t, batch_rule_t batch_rule>
12358 at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) {
12359   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12360   auto maybe_layer = maybeCurrentDynamicLayer();
12361   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12362   int64_t cur_level = maybe_layer->layerId();
12363   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12364     return at::_ops::heaviside_::call(self, values);
12365   }
12366   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12367   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12368   batch_rule(self_value, self_bdim, values_value, values_bdim);
12369   return self;
12370 }
12371 template <typename batch_rule_t, batch_rule_t batch_rule>
12372 at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
12373   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12374   auto maybe_layer = maybeCurrentDynamicLayer();
12375   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12376   int64_t cur_level = maybe_layer->layerId();
12377   if (!isBatchedAtLevel(self, cur_level)) {
12378     return at::_ops::rsub_Scalar::call(self, other, alpha);
12379   }
12380   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12381   auto results = batch_rule(self_value, self_bdim, other, alpha);
12382   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12383 }
12384 template <typename batch_rule_t, batch_rule_t batch_rule>
12385 at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
12386   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12387   auto maybe_layer = maybeCurrentDynamicLayer();
12388   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12389   int64_t cur_level = maybe_layer->layerId();
12390   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
12391     return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
12392   }
12393   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12394   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
12395   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12396   auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
12397   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12398 }
12399 template <typename batch_rule_t, batch_rule_t batch_rule>
12400 at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
12401   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12402   auto maybe_layer = maybeCurrentDynamicLayer();
12403   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12404   int64_t cur_level = maybe_layer->layerId();
12405   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
12406     return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha);
12407   }
12408   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12409   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
12410   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12411   auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
12412   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12413 }
12414 template <typename batch_rule_t, batch_rule_t batch_rule>
12415 ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
12416   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12417   auto maybe_layer = maybeCurrentDynamicLayer();
12418   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12419   int64_t cur_level = maybe_layer->layerId();
12420   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12421     return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
12422   }
12423   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12424   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
12425   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce);
12426   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12427 }
12428 template <typename batch_rule_t, batch_rule_t batch_rule>
12429 ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
12430   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12431   auto maybe_layer = maybeCurrentDynamicLayer();
12432   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12433   int64_t cur_level = maybe_layer->layerId();
12434   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) {
12435     return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
12436   }
12437   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12438   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
12439   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
12440   auto [arg_out_value, arg_out_bdim] = unwrapTensorAtLevel(arg_out, cur_level);
12441   auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask);
12442   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12443 }
12444 template <typename batch_rule_t, batch_rule_t batch_rule>
12445 at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
12446   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12447   auto maybe_layer = maybeCurrentDynamicLayer();
12448   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12449   int64_t cur_level = maybe_layer->layerId();
12450   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
12451     return at::_ops::addmm::call(self, mat1, mat2, beta, alpha);
12452   }
12453   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12454   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
12455   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12456   auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
12457   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12458 }
12459 template <typename batch_rule_t, batch_rule_t batch_rule>
12460 at::Tensor addmm_dtype_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::ScalarType out_dtype, const at::Scalar & beta, const at::Scalar & alpha) {
12461   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12462   auto maybe_layer = maybeCurrentDynamicLayer();
12463   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12464   int64_t cur_level = maybe_layer->layerId();
12465   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
12466     return at::_ops::addmm_dtype::call(self, mat1, mat2, out_dtype, beta, alpha);
12467   }
12468   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12469   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
12470   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12471   auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, out_dtype, beta, alpha);
12472   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12473 }
12474 template <typename batch_rule_t, batch_rule_t batch_rule>
12475 at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
12476   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12477   auto maybe_layer = maybeCurrentDynamicLayer();
12478   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12479   int64_t cur_level = maybe_layer->layerId();
12480   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
12481     return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha);
12482   }
12483   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12484   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
12485   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12486   batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
12487   return self;
12488 }
12489 template <typename batch_rule_t, batch_rule_t batch_rule>
12490 at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
12491   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12492   auto maybe_layer = maybeCurrentDynamicLayer();
12493   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12494   int64_t cur_level = maybe_layer->layerId();
12495   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
12496     return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
12497   }
12498   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12499   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
12500   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12501   auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu);
12502   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12503 }
12504 template <typename batch_rule_t, batch_rule_t batch_rule>
12505 at::Tensor _scaled_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum) {
12506   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12507   auto maybe_layer = maybeCurrentDynamicLayer();
12508   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12509   int64_t cur_level = maybe_layer->layerId();
12510   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scale_a, cur_level) && !isBatchedAtLevel(scale_b, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(scale_result, cur_level)) {
12511     return at::_ops::_scaled_mm::call(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum);
12512   }
12513   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12514   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12515   auto [scale_a_value, scale_a_bdim] = unwrapTensorAtLevel(scale_a, cur_level);
12516   auto [scale_b_value, scale_b_bdim] = unwrapTensorAtLevel(scale_b, cur_level);
12517   std::optional<Tensor> bias_value;
12518   std::optional<int64_t> bias_bdim;
12519   if (bias) {
12520       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
12521   }
12522   std::optional<Tensor> scale_result_value;
12523   std::optional<int64_t> scale_result_bdim;
12524   if (scale_result) {
12525       std::tie(scale_result_value, scale_result_bdim) = unwrapTensorAtLevel(scale_result.value(), cur_level);
12526   }
12527   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scale_a_value, scale_a_bdim, scale_b_value, scale_b_bdim, bias_value, bias_bdim, scale_result_value, scale_result_bdim, out_dtype, use_fast_accum);
12528   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12529 }
12530 template <typename batch_rule_t, batch_rule_t batch_rule>
12531 at::Tensor _scaled_grouped_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & offs, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum) {
12532   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12533   auto maybe_layer = maybeCurrentDynamicLayer();
12534   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12535   int64_t cur_level = maybe_layer->layerId();
12536   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scale_a, cur_level) && !isBatchedAtLevel(scale_b, cur_level) && !isBatchedAtLevel(offs, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(scale_result, cur_level)) {
12537     return at::_ops::_scaled_grouped_mm::call(self, mat2, scale_a, scale_b, offs, bias, scale_result, out_dtype, use_fast_accum);
12538   }
12539   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12540   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12541   auto [scale_a_value, scale_a_bdim] = unwrapTensorAtLevel(scale_a, cur_level);
12542   auto [scale_b_value, scale_b_bdim] = unwrapTensorAtLevel(scale_b, cur_level);
12543   std::optional<Tensor> offs_value;
12544   std::optional<int64_t> offs_bdim;
12545   if (offs) {
12546       std::tie(offs_value, offs_bdim) = unwrapTensorAtLevel(offs.value(), cur_level);
12547   }
12548   std::optional<Tensor> bias_value;
12549   std::optional<int64_t> bias_bdim;
12550   if (bias) {
12551       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
12552   }
12553   std::optional<Tensor> scale_result_value;
12554   std::optional<int64_t> scale_result_bdim;
12555   if (scale_result) {
12556       std::tie(scale_result_value, scale_result_bdim) = unwrapTensorAtLevel(scale_result.value(), cur_level);
12557   }
12558   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scale_a_value, scale_a_bdim, scale_b_value, scale_b_bdim, offs_value, offs_bdim, bias_value, bias_bdim, scale_result_value, scale_result_bdim, out_dtype, use_fast_accum);
12559   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12560 }
12561 template <typename batch_rule_t, batch_rule_t batch_rule>
12562 at::Tensor _grouped_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const ::std::optional<at::Tensor> & offs, const ::std::optional<at::Tensor> & bias, ::std::optional<at::ScalarType> out_dtype) {
12563   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12564   auto maybe_layer = maybeCurrentDynamicLayer();
12565   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12566   int64_t cur_level = maybe_layer->layerId();
12567   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(offs, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
12568     return at::_ops::_grouped_mm::call(self, mat2, offs, bias, out_dtype);
12569   }
12570   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12571   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
12572   std::optional<Tensor> offs_value;
12573   std::optional<int64_t> offs_bdim;
12574   if (offs) {
12575       std::tie(offs_value, offs_bdim) = unwrapTensorAtLevel(offs.value(), cur_level);
12576   }
12577   std::optional<Tensor> bias_value;
12578   std::optional<int64_t> bias_bdim;
12579   if (bias) {
12580       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
12581   }
12582   auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, offs_value, offs_bdim, bias_value, bias_bdim, out_dtype);
12583   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12584 }
12585 template <typename batch_rule_t, batch_rule_t batch_rule>
12586 at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12587   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12588   auto maybe_layer = maybeCurrentDynamicLayer();
12589   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12590   int64_t cur_level = maybe_layer->layerId();
12591   if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12592     return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
12593   }
12594   auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level);
12595   auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level);
12596   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12597   auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12598   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12599 }
12600 template <typename batch_rule_t, batch_rule_t batch_rule>
12601 at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12602   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12603   auto maybe_layer = maybeCurrentDynamicLayer();
12604   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12605   int64_t cur_level = maybe_layer->layerId();
12606   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12607     return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
12608   }
12609   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12610   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12611   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12612   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12613   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12614 }
12615 template <typename batch_rule_t, batch_rule_t batch_rule>
12616 at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12617   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12618   auto maybe_layer = maybeCurrentDynamicLayer();
12619   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12620   int64_t cur_level = maybe_layer->layerId();
12621   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12622     return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
12623   }
12624   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12625   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12626   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12627   auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12628   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12629 }
12630 template <typename batch_rule_t, batch_rule_t batch_rule>
12631 at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12632   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12633   auto maybe_layer = maybeCurrentDynamicLayer();
12634   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12635   int64_t cur_level = maybe_layer->layerId();
12636   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12637     return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
12638   }
12639   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12640   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12641   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12642   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12643   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12644 }
12645 template <typename batch_rule_t, batch_rule_t batch_rule>
12646 at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12647   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12648   auto maybe_layer = maybeCurrentDynamicLayer();
12649   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12650   int64_t cur_level = maybe_layer->layerId();
12651   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12652     return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
12653   }
12654   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12655   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12656   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12657   auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12658   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12659 }
12660 template <typename batch_rule_t, batch_rule_t batch_rule>
12661 at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12662   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12663   auto maybe_layer = maybeCurrentDynamicLayer();
12664   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12665   int64_t cur_level = maybe_layer->layerId();
12666   if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12667     return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
12668   }
12669   auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level);
12670   auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level);
12671   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12672   auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
12673   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12674 }
12675 template <typename batch_rule_t, batch_rule_t batch_rule>
12676 at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12677   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12678   auto maybe_layer = maybeCurrentDynamicLayer();
12679   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12680   int64_t cur_level = maybe_layer->layerId();
12681   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12682     return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
12683   }
12684   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12685   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12686   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12687   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
12688   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12689 }
12690 template <typename batch_rule_t, batch_rule_t batch_rule>
12691 at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12692   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12693   auto maybe_layer = maybeCurrentDynamicLayer();
12694   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12695   int64_t cur_level = maybe_layer->layerId();
12696   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12697     return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
12698   }
12699   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12700   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12701   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12702   auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
12703   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12704 }
12705 template <typename batch_rule_t, batch_rule_t batch_rule>
12706 at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12707   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12708   auto maybe_layer = maybeCurrentDynamicLayer();
12709   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12710   int64_t cur_level = maybe_layer->layerId();
12711   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12712     return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
12713   }
12714   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12715   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12716   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12717   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
12718   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12719 }
12720 template <typename batch_rule_t, batch_rule_t batch_rule>
12721 at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12722   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12723   auto maybe_layer = maybeCurrentDynamicLayer();
12724   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12725   int64_t cur_level = maybe_layer->layerId();
12726   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12727     return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
12728   }
12729   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12730   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12731   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12732   auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
12733   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12734 }
12735 template <typename batch_rule_t, batch_rule_t batch_rule>
12736 at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12737   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12738   auto maybe_layer = maybeCurrentDynamicLayer();
12739   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12740   int64_t cur_level = maybe_layer->layerId();
12741   if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12742     return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
12743   }
12744   auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level);
12745   auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level);
12746   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12747   auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12748   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12749 }
12750 template <typename batch_rule_t, batch_rule_t batch_rule>
12751 at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12752   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12753   auto maybe_layer = maybeCurrentDynamicLayer();
12754   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12755   int64_t cur_level = maybe_layer->layerId();
12756   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12757     return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
12758   }
12759   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12760   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12761   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12762   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12763   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12764 }
12765 template <typename batch_rule_t, batch_rule_t batch_rule>
12766 at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12767   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12768   auto maybe_layer = maybeCurrentDynamicLayer();
12769   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12770   int64_t cur_level = maybe_layer->layerId();
12771   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12772     return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
12773   }
12774   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12775   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12776   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12777   auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12778   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12779 }
12780 template <typename batch_rule_t, batch_rule_t batch_rule>
12781 at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12782   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12783   auto maybe_layer = maybeCurrentDynamicLayer();
12784   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12785   int64_t cur_level = maybe_layer->layerId();
12786   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12787     return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
12788   }
12789   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12790   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12791   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12792   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12793   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12794 }
12795 template <typename batch_rule_t, batch_rule_t batch_rule>
12796 at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
12797   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12798   auto maybe_layer = maybeCurrentDynamicLayer();
12799   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12800   int64_t cur_level = maybe_layer->layerId();
12801   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12802     return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
12803   }
12804   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12805   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12806   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12807   auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
12808   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12809 }
12810 template <typename batch_rule_t, batch_rule_t batch_rule>
12811 at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
12812   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12813   auto maybe_layer = maybeCurrentDynamicLayer();
12814   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12815   int64_t cur_level = maybe_layer->layerId();
12816   if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12817     return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory, is_coalesced);
12818   }
12819   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
12820   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12821   auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced);
12822   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12823 }
12824 template <typename batch_rule_t, batch_rule_t batch_rule>
12825 at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
12826   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12827   auto maybe_layer = maybeCurrentDynamicLayer();
12828   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12829   int64_t cur_level = maybe_layer->layerId();
12830   if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12831     return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
12832   }
12833   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
12834   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12835   auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced);
12836   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12837 }
12838 template <typename batch_rule_t, batch_rule_t batch_rule>
12839 at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
12840   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12841   auto maybe_layer = maybeCurrentDynamicLayer();
12842   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12843   int64_t cur_level = maybe_layer->layerId();
12844   if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12845     return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
12846   }
12847   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
12848   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12849   auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced);
12850   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12851 }
12852 template <typename batch_rule_t, batch_rule_t batch_rule>
12853 void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> is_coalesced, ::std::optional<bool> check_pinning) {
12854   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12855   auto maybe_layer = maybeCurrentDynamicLayer();
12856   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
12857   int64_t cur_level = maybe_layer->layerId();
12858   if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12859     return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size, is_coalesced, check_pinning);
12860   }
12861   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
12862   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12863   batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, is_coalesced, check_pinning);
12864 }
12865 template <typename batch_rule_t, batch_rule_t batch_rule>
12866 void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout, ::std::optional<bool> check_pinning) {
12867   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12868   auto maybe_layer = maybeCurrentDynamicLayer();
12869   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
12870   int64_t cur_level = maybe_layer->layerId();
12871   if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12872     return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout, check_pinning);
12873   }
12874   auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level);
12875   auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level);
12876   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12877   batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout, check_pinning);
12878 }
12879 template <typename batch_rule_t, batch_rule_t batch_rule>
12880 void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> check_pinning) {
12881   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12882   auto maybe_layer = maybeCurrentDynamicLayer();
12883   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
12884   int64_t cur_level = maybe_layer->layerId();
12885   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12886     return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size, check_pinning);
12887   }
12888   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12889   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12890   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12891   batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, check_pinning);
12892 }
12893 template <typename batch_rule_t, batch_rule_t batch_rule>
12894 void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> check_pinning) {
12895   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12896   auto maybe_layer = maybeCurrentDynamicLayer();
12897   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
12898   int64_t cur_level = maybe_layer->layerId();
12899   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12900     return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size, check_pinning);
12901   }
12902   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12903   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12904   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12905   batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, check_pinning);
12906 }
12907 template <typename batch_rule_t, batch_rule_t batch_rule>
12908 void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> check_pinning) {
12909   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12910   auto maybe_layer = maybeCurrentDynamicLayer();
12911   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
12912   int64_t cur_level = maybe_layer->layerId();
12913   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12914     return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size, check_pinning);
12915   }
12916   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
12917   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
12918   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12919   batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, check_pinning);
12920 }
12921 template <typename batch_rule_t, batch_rule_t batch_rule>
12922 void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> check_pinning) {
12923   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12924   auto maybe_layer = maybeCurrentDynamicLayer();
12925   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
12926   int64_t cur_level = maybe_layer->layerId();
12927   if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12928     return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size, check_pinning);
12929   }
12930   auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level);
12931   auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level);
12932   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12933   batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, check_pinning);
12934 }
12935 template <typename batch_rule_t, batch_rule_t batch_rule>
12936 at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
12937   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12938   auto maybe_layer = maybeCurrentDynamicLayer();
12939   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12940   int64_t cur_level = maybe_layer->layerId();
12941   if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
12942     return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
12943   }
12944   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
12945   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
12946   auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced);
12947   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12948 }
12949 template <typename batch_rule_t, batch_rule_t batch_rule>
12950 const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
12951   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12952   auto maybe_layer = maybeCurrentDynamicLayer();
12953   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12954   int64_t cur_level = maybe_layer->layerId();
12955   if (!isBatchedAtLevel(self, cur_level)) {
12956     return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim);
12957   }
12958   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12959   batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
12960   return self;
12961 }
12962 template <typename batch_rule_t, batch_rule_t batch_rule>
12963 const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
12964   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12965   auto maybe_layer = maybeCurrentDynamicLayer();
12966   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12967   int64_t cur_level = maybe_layer->layerId();
12968   if (!isBatchedAtLevel(self, cur_level)) {
12969     return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim);
12970   }
12971   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12972   batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
12973   return self;
12974 }
12975 template <typename batch_rule_t, batch_rule_t batch_rule>
12976 at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
12977   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12978   auto maybe_layer = maybeCurrentDynamicLayer();
12979   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12980   int64_t cur_level = maybe_layer->layerId();
12981   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
12982     return at::_ops::sparse_mask::call(self, mask);
12983   }
12984   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12985   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
12986   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
12987   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12988 }
12989 template <typename batch_rule_t, batch_rule_t batch_rule>
12990 at::Tensor _sparse_mask_projection_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
12991   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12992   auto maybe_layer = maybeCurrentDynamicLayer();
12993   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12994   int64_t cur_level = maybe_layer->layerId();
12995   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
12996     return at::_ops::_sparse_mask_projection::call(self, mask, accumulate_matches);
12997   }
12998   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
12999   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
13000   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, accumulate_matches);
13001   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13002 }
13003 template <typename batch_rule_t, batch_rule_t batch_rule>
13004 ::std::vector<at::Tensor> _to_cpu_generated_plumbing(at::TensorList tensors) {
13005   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13006   auto maybe_layer = maybeCurrentDynamicLayer();
13007   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13008   int64_t cur_level = maybe_layer->layerId();
13009   if (!isBatchedAtLevel(tensors, cur_level)) {
13010     return at::_ops::_to_cpu::call(tensors);
13011   }
13012 
13013   auto results = batch_rule(tensors);
13014   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13015 }
13016 template <typename batch_rule_t, batch_rule_t batch_rule>
13017 at::Tensor to_dense_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
13018   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13019   auto maybe_layer = maybeCurrentDynamicLayer();
13020   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13021   int64_t cur_level = maybe_layer->layerId();
13022   if (!isBatchedAtLevel(self, cur_level)) {
13023     return at::_ops::to_dense::call(self, dtype, masked_grad);
13024   }
13025   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13026   auto results = batch_rule(self_value, self_bdim, dtype, masked_grad);
13027   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13028 }
13029 template <typename batch_rule_t, batch_rule_t batch_rule>
13030 at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
13031   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13032   auto maybe_layer = maybeCurrentDynamicLayer();
13033   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13034   int64_t cur_level = maybe_layer->layerId();
13035   if (!isBatchedAtLevel(self, cur_level)) {
13036     return at::_ops::_to_dense::call(self, dtype, masked_grad);
13037   }
13038   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13039   auto results = batch_rule(self_value, self_bdim, dtype, masked_grad);
13040   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13041 }
13042 template <typename batch_rule_t, batch_rule_t batch_rule>
13043 at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, ::std::optional<bool> masked_grad) {
13044   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13045   auto maybe_layer = maybeCurrentDynamicLayer();
13046   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13047   int64_t cur_level = maybe_layer->layerId();
13048   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
13049     return at::_ops::to_dense_backward::call(grad, input, masked_grad);
13050   }
13051   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
13052   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
13053   auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, masked_grad);
13054   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13055 }
13056 template <typename batch_rule_t, batch_rule_t batch_rule>
13057 at::Tensor coalesce_generated_plumbing(const at::Tensor & self) {
13058   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13059   auto maybe_layer = maybeCurrentDynamicLayer();
13060   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13061   int64_t cur_level = maybe_layer->layerId();
13062   if (!isBatchedAtLevel(self, cur_level)) {
13063     return at::_ops::coalesce::call(self);
13064   }
13065   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13066   auto results = batch_rule(self_value, self_bdim);
13067   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13068 }
13069 template <typename batch_rule_t, batch_rule_t batch_rule>
13070 at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) {
13071   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13072   auto maybe_layer = maybeCurrentDynamicLayer();
13073   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13074   int64_t cur_level = maybe_layer->layerId();
13075   if (!isBatchedAtLevel(self, cur_level)) {
13076     return at::_ops::_coalesce::call(self);
13077   }
13078   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13079   auto results = batch_rule(self_value, self_bdim);
13080   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13081 }
13082 template <typename batch_rule_t, batch_rule_t batch_rule>
13083 at::Tensor _indices_generated_plumbing(const at::Tensor & self) {
13084   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13085   auto maybe_layer = maybeCurrentDynamicLayer();
13086   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13087   int64_t cur_level = maybe_layer->layerId();
13088   if (!isBatchedAtLevel(self, cur_level)) {
13089     return at::_ops::_indices::call(self);
13090   }
13091   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13092   auto results = batch_rule(self_value, self_bdim);
13093   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13094 }
13095 template <typename batch_rule_t, batch_rule_t batch_rule>
13096 at::Tensor _values_generated_plumbing(const at::Tensor & self) {
13097   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13098   auto maybe_layer = maybeCurrentDynamicLayer();
13099   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13100   int64_t cur_level = maybe_layer->layerId();
13101   if (!isBatchedAtLevel(self, cur_level)) {
13102     return at::_ops::_values::call(self);
13103   }
13104   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13105   auto results = batch_rule(self_value, self_bdim);
13106   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13107 }
13108 template <typename batch_rule_t, batch_rule_t batch_rule>
13109 at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) {
13110   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13111   auto maybe_layer = maybeCurrentDynamicLayer();
13112   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13113   int64_t cur_level = maybe_layer->layerId();
13114   if (!isBatchedAtLevel(self, cur_level)) {
13115     return at::_ops::_coalesced_::call(self, coalesced);
13116   }
13117   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13118   batch_rule(self_value, self_bdim, coalesced);
13119   return self;
13120 }
13121 template <typename batch_rule_t, batch_rule_t batch_rule>
13122 at::Tensor indices_generated_plumbing(const at::Tensor & self) {
13123   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13124   auto maybe_layer = maybeCurrentDynamicLayer();
13125   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13126   int64_t cur_level = maybe_layer->layerId();
13127   if (!isBatchedAtLevel(self, cur_level)) {
13128     return at::_ops::indices::call(self);
13129   }
13130   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13131   auto results = batch_rule(self_value, self_bdim);
13132   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13133 }
13134 template <typename batch_rule_t, batch_rule_t batch_rule>
13135 at::Tensor values_generated_plumbing(const at::Tensor & self) {
13136   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13137   auto maybe_layer = maybeCurrentDynamicLayer();
13138   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13139   int64_t cur_level = maybe_layer->layerId();
13140   if (!isBatchedAtLevel(self, cur_level)) {
13141     return at::_ops::values::call(self);
13142   }
13143   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13144   auto results = batch_rule(self_value, self_bdim);
13145   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13146 }
13147 template <typename batch_rule_t, batch_rule_t batch_rule>
13148 at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) {
13149   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13150   auto maybe_layer = maybeCurrentDynamicLayer();
13151   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13152   int64_t cur_level = maybe_layer->layerId();
13153   if (!isBatchedAtLevel(self, cur_level)) {
13154     return at::_ops::crow_indices::call(self);
13155   }
13156   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13157   auto results = batch_rule(self_value, self_bdim);
13158   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13159 }
13160 template <typename batch_rule_t, batch_rule_t batch_rule>
13161 at::Tensor col_indices_generated_plumbing(const at::Tensor & self) {
13162   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13163   auto maybe_layer = maybeCurrentDynamicLayer();
13164   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13165   int64_t cur_level = maybe_layer->layerId();
13166   if (!isBatchedAtLevel(self, cur_level)) {
13167     return at::_ops::col_indices::call(self);
13168   }
13169   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13170   auto results = batch_rule(self_value, self_bdim);
13171   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13172 }
13173 template <typename batch_rule_t, batch_rule_t batch_rule>
13174 at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) {
13175   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13176   auto maybe_layer = maybeCurrentDynamicLayer();
13177   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13178   int64_t cur_level = maybe_layer->layerId();
13179   if (!isBatchedAtLevel(self, cur_level)) {
13180     return at::_ops::ccol_indices::call(self);
13181   }
13182   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13183   auto results = batch_rule(self_value, self_bdim);
13184   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13185 }
13186 template <typename batch_rule_t, batch_rule_t batch_rule>
13187 at::Tensor row_indices_generated_plumbing(const at::Tensor & self) {
13188   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13189   auto maybe_layer = maybeCurrentDynamicLayer();
13190   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13191   int64_t cur_level = maybe_layer->layerId();
13192   if (!isBatchedAtLevel(self, cur_level)) {
13193     return at::_ops::row_indices::call(self);
13194   }
13195   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13196   auto results = batch_rule(self_value, self_bdim);
13197   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13198 }
13199 template <typename batch_rule_t, batch_rule_t batch_rule>
13200 at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) {
13201   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13202   auto maybe_layer = maybeCurrentDynamicLayer();
13203   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13204   int64_t cur_level = maybe_layer->layerId();
13205   if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
13206     return at::_ops::hspmm::call(mat1, mat2);
13207   }
13208   auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level);
13209   auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level);
13210   auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim);
13211   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13212 }
13213 template <typename batch_rule_t, batch_rule_t batch_rule>
13214 at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
13215   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13216   auto maybe_layer = maybeCurrentDynamicLayer();
13217   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13218   int64_t cur_level = maybe_layer->layerId();
13219   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
13220     return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking);
13221   }
13222   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13223   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
13224   batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
13225   return self;
13226 }
13227 template <typename batch_rule_t, batch_rule_t batch_rule>
13228 ::std::vector<at::Tensor> unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
13229   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13230   auto maybe_layer = maybeCurrentDynamicLayer();
13231   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13232   int64_t cur_level = maybe_layer->layerId();
13233   if (!isBatchedAtLevel(self, cur_level)) {
13234     return at::_ops::unbind_int::call(self, dim);
13235   }
13236   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13237   auto results = batch_rule(self_value, self_bdim, dim);
13238   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13239 }
13240 template <typename batch_rule_t, batch_rule_t batch_rule>
13241 ::std::vector<at::Tensor> unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
13242   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13243   auto maybe_layer = maybeCurrentDynamicLayer();
13244   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13245   int64_t cur_level = maybe_layer->layerId();
13246   if (!isBatchedAtLevel(self, cur_level)) {
13247     return at::_ops::unbind_Dimname::call(self, dim);
13248   }
13249   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13250   auto results = batch_rule(self_value, self_bdim, dim);
13251   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13252 }
13253 template <typename batch_rule_t, batch_rule_t batch_rule>
13254 at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
13255   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13256   auto maybe_layer = maybeCurrentDynamicLayer();
13257   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13258   int64_t cur_level = maybe_layer->layerId();
13259   if (!isBatchedAtLevel(self, cur_level)) {
13260     return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim);
13261   }
13262   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13263   auto results = batch_rule(self_value, self_bdim, sparse_dim);
13264   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13265 }
13266 template <typename batch_rule_t, batch_rule_t batch_rule>
13267 at::Tensor _to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
13268   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13269   auto maybe_layer = maybeCurrentDynamicLayer();
13270   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13271   int64_t cur_level = maybe_layer->layerId();
13272   if (!isBatchedAtLevel(self, cur_level)) {
13273     return at::_ops::_to_sparse_sparse_dim::call(self, sparse_dim);
13274   }
13275   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13276   auto results = batch_rule(self_value, self_bdim, sparse_dim);
13277   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13278 }
13279 template <typename batch_rule_t, batch_rule_t batch_rule>
13280 at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
13281   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13282   auto maybe_layer = maybeCurrentDynamicLayer();
13283   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13284   int64_t cur_level = maybe_layer->layerId();
13285   if (!isBatchedAtLevel(self, cur_level)) {
13286     return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim);
13287   }
13288   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13289   auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
13290   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13291 }
13292 template <typename batch_rule_t, batch_rule_t batch_rule>
13293 at::Tensor _to_sparse_generated_plumbing(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
13294   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13295   auto maybe_layer = maybeCurrentDynamicLayer();
13296   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13297   int64_t cur_level = maybe_layer->layerId();
13298   if (!isBatchedAtLevel(self, cur_level)) {
13299     return at::_ops::_to_sparse::call(self, layout, blocksize, dense_dim);
13300   }
13301   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13302   auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
13303   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13304 }
13305 template <typename batch_rule_t, batch_rule_t batch_rule>
13306 at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
13307   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13308   auto maybe_layer = maybeCurrentDynamicLayer();
13309   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13310   int64_t cur_level = maybe_layer->layerId();
13311   if (!isBatchedAtLevel(self, cur_level)) {
13312     return at::_ops::to_sparse_csr::call(self, dense_dim);
13313   }
13314   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13315   auto results = batch_rule(self_value, self_bdim, dense_dim);
13316   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13317 }
13318 template <typename batch_rule_t, batch_rule_t batch_rule>
13319 at::Tensor _to_sparse_csr_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
13320   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13321   auto maybe_layer = maybeCurrentDynamicLayer();
13322   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13323   int64_t cur_level = maybe_layer->layerId();
13324   if (!isBatchedAtLevel(self, cur_level)) {
13325     return at::_ops::_to_sparse_csr::call(self, dense_dim);
13326   }
13327   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13328   auto results = batch_rule(self_value, self_bdim, dense_dim);
13329   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13330 }
13331 template <typename batch_rule_t, batch_rule_t batch_rule>
13332 at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
13333   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13334   auto maybe_layer = maybeCurrentDynamicLayer();
13335   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13336   int64_t cur_level = maybe_layer->layerId();
13337   if (!isBatchedAtLevel(self, cur_level)) {
13338     return at::_ops::to_sparse_csc::call(self, dense_dim);
13339   }
13340   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13341   auto results = batch_rule(self_value, self_bdim, dense_dim);
13342   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13343 }
13344 template <typename batch_rule_t, batch_rule_t batch_rule>
13345 at::Tensor _to_sparse_csc_generated_plumbing(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
13346   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13347   auto maybe_layer = maybeCurrentDynamicLayer();
13348   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13349   int64_t cur_level = maybe_layer->layerId();
13350   if (!isBatchedAtLevel(self, cur_level)) {
13351     return at::_ops::_to_sparse_csc::call(self, dense_dim);
13352   }
13353   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13354   auto results = batch_rule(self_value, self_bdim, dense_dim);
13355   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13356 }
13357 template <typename batch_rule_t, batch_rule_t batch_rule>
13358 at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
13359   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13360   auto maybe_layer = maybeCurrentDynamicLayer();
13361   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13362   int64_t cur_level = maybe_layer->layerId();
13363   if (!isBatchedAtLevel(self, cur_level)) {
13364     return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim);
13365   }
13366   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13367   auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
13368   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13369 }
13370 template <typename batch_rule_t, batch_rule_t batch_rule>
13371 at::Tensor _to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
13372   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13373   auto maybe_layer = maybeCurrentDynamicLayer();
13374   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13375   int64_t cur_level = maybe_layer->layerId();
13376   if (!isBatchedAtLevel(self, cur_level)) {
13377     return at::_ops::_to_sparse_bsr::call(self, blocksize, dense_dim);
13378   }
13379   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13380   auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
13381   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13382 }
13383 template <typename batch_rule_t, batch_rule_t batch_rule>
13384 at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
13385   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13386   auto maybe_layer = maybeCurrentDynamicLayer();
13387   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13388   int64_t cur_level = maybe_layer->layerId();
13389   if (!isBatchedAtLevel(self, cur_level)) {
13390     return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim);
13391   }
13392   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13393   auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
13394   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13395 }
13396 template <typename batch_rule_t, batch_rule_t batch_rule>
13397 at::Tensor _to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
13398   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13399   auto maybe_layer = maybeCurrentDynamicLayer();
13400   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13401   int64_t cur_level = maybe_layer->layerId();
13402   if (!isBatchedAtLevel(self, cur_level)) {
13403     return at::_ops::_to_sparse_bsc::call(self, blocksize, dense_dim);
13404   }
13405   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13406   auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
13407   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13408 }
13409 template <typename batch_rule_t, batch_rule_t batch_rule>
13410 ::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured_generated_plumbing(const at::Tensor & dense) {
13411   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13412   auto maybe_layer = maybeCurrentDynamicLayer();
13413   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13414   int64_t cur_level = maybe_layer->layerId();
13415   if (!isBatchedAtLevel(dense, cur_level)) {
13416     return at::_ops::_to_sparse_semi_structured::call(dense);
13417   }
13418   auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level);
13419   auto results = batch_rule(dense_value, dense_bdim);
13420   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13421 }
13422 template <typename batch_rule_t, batch_rule_t batch_rule>
13423 at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
13424   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13425   auto maybe_layer = maybeCurrentDynamicLayer();
13426   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13427   int64_t cur_level = maybe_layer->layerId();
13428   if (!isBatchedAtLevel(self, cur_level)) {
13429     return at::_ops::to_mkldnn::call(self, dtype);
13430   }
13431   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13432   auto results = batch_rule(self_value, self_bdim, dtype);
13433   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13434 }
13435 template <typename batch_rule_t, batch_rule_t batch_rule>
13436 at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
13437   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13438   auto maybe_layer = maybeCurrentDynamicLayer();
13439   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13440   int64_t cur_level = maybe_layer->layerId();
13441   if (!isBatchedAtLevel(self, cur_level)) {
13442     return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size);
13443   }
13444   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13445   auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size);
13446   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13447 }
13448 template <typename batch_rule_t, batch_rule_t batch_rule>
13449 at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
13450   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13451   auto maybe_layer = maybeCurrentDynamicLayer();
13452   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13453   int64_t cur_level = maybe_layer->layerId();
13454   if (!isBatchedAtLevel(self, cur_level)) {
13455     return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups, input_size);
13456   }
13457   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13458   auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size);
13459   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13460 }
13461 template <typename batch_rule_t, batch_rule_t batch_rule>
13462 at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
13463   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13464   auto maybe_layer = maybeCurrentDynamicLayer();
13465   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13466   int64_t cur_level = maybe_layer->layerId();
13467   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
13468     return at::_ops::to_mkldnn_backward::call(grad, input);
13469   }
13470   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
13471   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
13472   auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
13473   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13474 }
13475 template <typename batch_rule_t, batch_rule_t batch_rule>
13476 at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
13477   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13478   auto maybe_layer = maybeCurrentDynamicLayer();
13479   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13480   int64_t cur_level = maybe_layer->layerId();
13481   if (!isBatchedAtLevel(self, cur_level)) {
13482     return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range);
13483   }
13484   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13485   auto results = batch_rule(self_value, self_bdim, dtype, reduce_range);
13486   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13487 }
13488 template <typename batch_rule_t, batch_rule_t batch_rule>
13489 at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
13490   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13491   auto maybe_layer = maybeCurrentDynamicLayer();
13492   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13493   int64_t cur_level = maybe_layer->layerId();
13494   if (!isBatchedAtLevel(self, cur_level)) {
13495     return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype);
13496   }
13497   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13498   auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype);
13499   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13500 }
13501 template <typename batch_rule_t, batch_rule_t batch_rule>
13502 at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
13503   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13504   auto maybe_layer = maybeCurrentDynamicLayer();
13505   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13506   int64_t cur_level = maybe_layer->layerId();
13507   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13508     return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype);
13509   }
13510   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13511   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13512   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13513   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype);
13514   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13515 }
13516 template <typename batch_rule_t, batch_rule_t batch_rule>
13517 ::std::vector<at::Tensor> quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
13518   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13519   auto maybe_layer = maybeCurrentDynamicLayer();
13520   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13521   int64_t cur_level = maybe_layer->layerId();
13522   if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
13523     return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype);
13524   }
13525   auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level);
13526   auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level);
13527   auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype);
13528   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13529 }
13530 template <typename batch_rule_t, batch_rule_t batch_rule>
13531 at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
13532   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13533   auto maybe_layer = maybeCurrentDynamicLayer();
13534   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13535   int64_t cur_level = maybe_layer->layerId();
13536   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
13537     return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype);
13538   }
13539   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13540   auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level);
13541   auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level);
13542   auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype);
13543   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13544 }
13545 template <typename batch_rule_t, batch_rule_t batch_rule>
13546 at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) {
13547   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13548   auto maybe_layer = maybeCurrentDynamicLayer();
13549   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13550   int64_t cur_level = maybe_layer->layerId();
13551   if (!isBatchedAtLevel(self, cur_level)) {
13552     return at::_ops::dequantize_self::call(self);
13553   }
13554   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13555   auto results = batch_rule(self_value, self_bdim);
13556   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13557 }
13558 template <typename batch_rule_t, batch_rule_t batch_rule>
13559 ::std::vector<at::Tensor> dequantize_tensors_generated_plumbing(at::TensorList tensors) {
13560   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13561   auto maybe_layer = maybeCurrentDynamicLayer();
13562   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13563   int64_t cur_level = maybe_layer->layerId();
13564   if (!isBatchedAtLevel(tensors, cur_level)) {
13565     return at::_ops::dequantize_tensors::call(tensors);
13566   }
13567 
13568   auto results = batch_rule(tensors);
13569   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13570 }
13571 template <typename batch_rule_t, batch_rule_t batch_rule>
13572 at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) {
13573   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13574   auto maybe_layer = maybeCurrentDynamicLayer();
13575   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13576   int64_t cur_level = maybe_layer->layerId();
13577   if (!isBatchedAtLevel(self, cur_level)) {
13578     return at::_ops::q_per_channel_scales::call(self);
13579   }
13580   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13581   auto results = batch_rule(self_value, self_bdim);
13582   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13583 }
13584 template <typename batch_rule_t, batch_rule_t batch_rule>
13585 at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) {
13586   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13587   auto maybe_layer = maybeCurrentDynamicLayer();
13588   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13589   int64_t cur_level = maybe_layer->layerId();
13590   if (!isBatchedAtLevel(self, cur_level)) {
13591     return at::_ops::q_per_channel_zero_points::call(self);
13592   }
13593   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13594   auto results = batch_rule(self_value, self_bdim);
13595   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13596 }
13597 template <typename batch_rule_t, batch_rule_t batch_rule>
13598 at::Tensor int_repr_generated_plumbing(const at::Tensor & self) {
13599   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13600   auto maybe_layer = maybeCurrentDynamicLayer();
13601   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13602   int64_t cur_level = maybe_layer->layerId();
13603   if (!isBatchedAtLevel(self, cur_level)) {
13604     return at::_ops::int_repr::call(self);
13605   }
13606   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13607   auto results = batch_rule(self_value, self_bdim);
13608   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13609 }
13610 template <typename batch_rule_t, batch_rule_t batch_rule>
13611 at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) {
13612   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13613   auto maybe_layer = maybeCurrentDynamicLayer();
13614   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13615   int64_t cur_level = maybe_layer->layerId();
13616   if (!isBatchedAtLevel(self, cur_level)) {
13617     return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point);
13618   }
13619   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13620   auto results = batch_rule(self_value, self_bdim, scale, zero_point);
13621   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13622 }
13623 template <typename batch_rule_t, batch_rule_t batch_rule>
13624 at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
13625   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13626   auto maybe_layer = maybeCurrentDynamicLayer();
13627   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13628   int64_t cur_level = maybe_layer->layerId();
13629   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13630     return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis);
13631   }
13632   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13633   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13634   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13635   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis);
13636   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13637 }
13638 template <typename batch_rule_t, batch_rule_t batch_rule>
13639 at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
13640   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13641   auto maybe_layer = maybeCurrentDynamicLayer();
13642   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13643   int64_t cur_level = maybe_layer->layerId();
13644   if (!isBatchedAtLevel(self, cur_level)) {
13645     return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max);
13646   }
13647   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13648   auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
13649   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13650 }
13651 template <typename batch_rule_t, batch_rule_t batch_rule>
13652 at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
13653   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13654   auto maybe_layer = maybeCurrentDynamicLayer();
13655   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13656   int64_t cur_level = maybe_layer->layerId();
13657   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13658     return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max);
13659   }
13660   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13661   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13662   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13663   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max);
13664   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13665 }
13666 template <typename batch_rule_t, batch_rule_t batch_rule>
13667 ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
13668   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13669   auto maybe_layer = maybeCurrentDynamicLayer();
13670   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13671   int64_t cur_level = maybe_layer->layerId();
13672   if (!isBatchedAtLevel(self, cur_level)) {
13673     return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
13674   }
13675   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13676   auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
13677   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13678 }
13679 template <typename batch_rule_t, batch_rule_t batch_rule>
13680 ::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
13681   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13682   auto maybe_layer = maybeCurrentDynamicLayer();
13683   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13684   int64_t cur_level = maybe_layer->layerId();
13685   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) {
13686     return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
13687   }
13688   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13689   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13690   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13691   auto [fake_quant_enabled_value, fake_quant_enabled_bdim] = unwrapTensorAtLevel(fake_quant_enabled, cur_level);
13692   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max);
13693   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13694 }
13695 template <typename batch_rule_t, batch_rule_t batch_rule>
13696 at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
13697   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13698   auto maybe_layer = maybeCurrentDynamicLayer();
13699   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13700   int64_t cur_level = maybe_layer->layerId();
13701   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
13702     return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask);
13703   }
13704   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
13705   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
13706   auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
13707   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13708 }
13709 template <typename batch_rule_t, batch_rule_t batch_rule>
13710 at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
13711   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13712   auto maybe_layer = maybeCurrentDynamicLayer();
13713   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13714   int64_t cur_level = maybe_layer->layerId();
13715   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13716     return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor);
13717   }
13718   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13719   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13720   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13721   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
13722   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13723 }
13724 template <typename batch_rule_t, batch_rule_t batch_rule>
13725 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
13726   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13727   auto maybe_layer = maybeCurrentDynamicLayer();
13728   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13729   int64_t cur_level = maybe_layer->layerId();
13730   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13731     return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
13732   }
13733   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
13734   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13735   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13736   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13737   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
13738   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
13739 }
13740 template <typename batch_rule_t, batch_rule_t batch_rule>
13741 at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
13742   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13743   auto maybe_layer = maybeCurrentDynamicLayer();
13744   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13745   int64_t cur_level = maybe_layer->layerId();
13746   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13747     return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max);
13748   }
13749   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13750   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13751   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13752   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
13753   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13754 }
13755 template <typename batch_rule_t, batch_rule_t batch_rule>
13756 ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
13757   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13758   auto maybe_layer = maybeCurrentDynamicLayer();
13759   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13760   int64_t cur_level = maybe_layer->layerId();
13761   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13762     return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
13763   }
13764   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13765   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13766   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13767   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
13768   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13769 }
13770 template <typename batch_rule_t, batch_rule_t batch_rule>
13771 at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
13772   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13773   auto maybe_layer = maybeCurrentDynamicLayer();
13774   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13775   int64_t cur_level = maybe_layer->layerId();
13776   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
13777     return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask);
13778   }
13779   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
13780   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
13781   auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
13782   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13783 }
13784 template <typename batch_rule_t, batch_rule_t batch_rule>
13785 at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
13786   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13787   auto maybe_layer = maybeCurrentDynamicLayer();
13788   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13789   int64_t cur_level = maybe_layer->layerId();
13790   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13791     return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
13792   }
13793   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13794   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13795   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13796   auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
13797   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13798 }
13799 template <typename batch_rule_t, batch_rule_t batch_rule>
13800 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
13801   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13802   auto maybe_layer = maybeCurrentDynamicLayer();
13803   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13804   int64_t cur_level = maybe_layer->layerId();
13805   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
13806     return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
13807   }
13808   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
13809   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13810   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
13811   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
13812   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
13813   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
13814 }
13815 template <typename batch_rule_t, batch_rule_t batch_rule>
13816 at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) {
13817   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13818   auto maybe_layer = maybeCurrentDynamicLayer();
13819   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13820   int64_t cur_level = maybe_layer->layerId();
13821   if (!isBatchedAtLevel(weight, cur_level)) {
13822     return at::_ops::_saturate_weight_to_fp16::call(weight);
13823   }
13824   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
13825   auto results = batch_rule(weight_value, weight_bdim);
13826   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13827 }
13828 template <typename batch_rule_t, batch_rule_t batch_rule>
13829 ::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
13830   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13831   auto maybe_layer = maybeCurrentDynamicLayer();
13832   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13833   int64_t cur_level = maybe_layer->layerId();
13834   if (!isBatchedAtLevel(input, cur_level)) {
13835     return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width);
13836   }
13837   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
13838   auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width);
13839   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13840 }
13841 template <typename batch_rule_t, batch_rule_t batch_rule>
13842 at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
13843   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13844   auto maybe_layer = maybeCurrentDynamicLayer();
13845   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13846   int64_t cur_level = maybe_layer->layerId();
13847   if (!isBatchedAtLevel(self, cur_level)) {
13848     return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
13849   }
13850   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13851   auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
13852   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13853 }
13854 template <typename batch_rule_t, batch_rule_t batch_rule>
13855 at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
13856   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13857   auto maybe_layer = maybeCurrentDynamicLayer();
13858   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13859   int64_t cur_level = maybe_layer->layerId();
13860   if (!isBatchedAtLevel(self, cur_level)) {
13861     return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled);
13862   }
13863   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13864   auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled);
13865   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13866 }
13867 template <typename batch_rule_t, batch_rule_t batch_rule>
13868 at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format) {
13869   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13870   auto maybe_layer = maybeCurrentDynamicLayer();
13871   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13872   int64_t cur_level = maybe_layer->layerId();
13873   if (!isBatchedAtLevel(self, cur_level)) {
13874     return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
13875   }
13876   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13877   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format);
13878   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13879 }
13880 template <typename batch_rule_t, batch_rule_t batch_rule>
13881 at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
13882   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13883   auto maybe_layer = maybeCurrentDynamicLayer();
13884   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13885   int64_t cur_level = maybe_layer->layerId();
13886   if (!isBatchedAtLevel(self, cur_level)) {
13887     return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
13888   }
13889   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13890   auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
13891   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13892 }
13893 template <typename batch_rule_t, batch_rule_t batch_rule>
13894 at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
13895   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13896   auto maybe_layer = maybeCurrentDynamicLayer();
13897   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13898   int64_t cur_level = maybe_layer->layerId();
13899   if (!isBatchedAtLevel(self, cur_level)) {
13900     return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format);
13901   }
13902   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13903   auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format);
13904   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13905 }
13906 template <typename batch_rule_t, batch_rule_t batch_rule>
13907 at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
13908   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13909   auto maybe_layer = maybeCurrentDynamicLayer();
13910   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13911   int64_t cur_level = maybe_layer->layerId();
13912   if (!isBatchedAtLevel(self, cur_level)) {
13913     return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format);
13914   }
13915   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13916   auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format);
13917   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13918 }
13919 template <typename batch_rule_t, batch_rule_t batch_rule>
13920 at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
13921   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13922   auto maybe_layer = maybeCurrentDynamicLayer();
13923   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13924   int64_t cur_level = maybe_layer->layerId();
13925   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13926     return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format);
13927   }
13928   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13929   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
13930   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format);
13931   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13932 }
13933 template <typename batch_rule_t, batch_rule_t batch_rule>
13934 ::std::vector<at::Tensor> meshgrid_generated_plumbing(at::TensorList tensors) {
13935   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13936   auto maybe_layer = maybeCurrentDynamicLayer();
13937   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13938   int64_t cur_level = maybe_layer->layerId();
13939   if (!isBatchedAtLevel(tensors, cur_level)) {
13940     return at::_ops::meshgrid::call(tensors);
13941   }
13942 
13943   auto results = batch_rule(tensors);
13944   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13945 }
13946 template <typename batch_rule_t, batch_rule_t batch_rule>
13947 ::std::vector<at::Tensor> meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) {
13948   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13949   auto maybe_layer = maybeCurrentDynamicLayer();
13950   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13951   int64_t cur_level = maybe_layer->layerId();
13952   if (!isBatchedAtLevel(tensors, cur_level)) {
13953     return at::_ops::meshgrid_indexing::call(tensors, indexing);
13954   }
13955 
13956   auto results = batch_rule(tensors, indexing);
13957   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
13958 }
13959 template <typename batch_rule_t, batch_rule_t batch_rule>
13960 at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) {
13961   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13962   auto maybe_layer = maybeCurrentDynamicLayer();
13963   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13964   int64_t cur_level = maybe_layer->layerId();
13965   if (!isBatchedAtLevel(tensors, cur_level)) {
13966     return at::_ops::cartesian_prod::call(tensors);
13967   }
13968 
13969   auto results = batch_rule(tensors);
13970   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13971 }
13972 template <typename batch_rule_t, batch_rule_t batch_rule>
13973 at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) {
13974   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13975   auto maybe_layer = maybeCurrentDynamicLayer();
13976   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13977   int64_t cur_level = maybe_layer->layerId();
13978   if (!isBatchedAtLevel(self, cur_level)) {
13979     return at::_ops::combinations::call(self, r, with_replacement);
13980   }
13981   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
13982   auto results = batch_rule(self_value, self_bdim, r, with_replacement);
13983   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13984 }
13985 template <typename batch_rule_t, batch_rule_t batch_rule>
13986 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
13987   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13988   auto maybe_layer = maybeCurrentDynamicLayer();
13989   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13990   int64_t cur_level = maybe_layer->layerId();
13991   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
13992     return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
13993   }
13994   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
13995   auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
13996   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
13997 }
13998 template <typename batch_rule_t, batch_rule_t batch_rule>
13999 ::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward_generated_plumbing(const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
14000   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14001   auto maybe_layer = maybeCurrentDynamicLayer();
14002   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14003   int64_t cur_level = maybe_layer->layerId();
14004   if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14005     return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14006   }
14007   auto [z_state_value, z_state_bdim] = unwrapTensorAtLevel(z_state, cur_level);
14008   auto [cell_state_fwd_value, cell_state_fwd_bdim] = unwrapTensorAtLevel(cell_state_fwd, cur_level);
14009   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14010   auto [layersOutputs_value, layersOutputs_bdim] = unwrapTensorAtLevel(layersOutputs, cur_level);
14011   std::optional<Tensor> grad_y_value;
14012   std::optional<int64_t> grad_y_bdim;
14013   if (grad_y) {
14014       std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level);
14015   }
14016   std::optional<Tensor> grad_hy_value;
14017   std::optional<int64_t> grad_hy_bdim;
14018   if (grad_hy) {
14019       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
14020   }
14021   std::optional<Tensor> grad_cy_value;
14022   std::optional<int64_t> grad_cy_bdim;
14023   if (grad_cy) {
14024       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
14025   }
14026   auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14027   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
14028 }
14029 template <typename batch_rule_t, batch_rule_t batch_rule>
14030 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
14031   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14032   auto maybe_layer = maybeCurrentDynamicLayer();
14033   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14034   int64_t cur_level = maybe_layer->layerId();
14035   if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
14036     return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
14037   }
14038   auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level);
14039   auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level);
14040   auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level);
14041   std::optional<Tensor> input_bias_value;
14042   std::optional<int64_t> input_bias_bdim;
14043   if (input_bias) {
14044       std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
14045   }
14046   std::optional<Tensor> hidden_bias_value;
14047   std::optional<int64_t> hidden_bias_bdim;
14048   if (hidden_bias) {
14049       std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
14050   }
14051   auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
14052   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
14053 }
14054 template <typename batch_rule_t, batch_rule_t batch_rule>
14055 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
14056   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14057   auto maybe_layer = maybeCurrentDynamicLayer();
14058   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14059   int64_t cur_level = maybe_layer->layerId();
14060   if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
14061     return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
14062   }
14063   auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level);
14064   auto [cy_value, cy_bdim] = unwrapTensorAtLevel(cy, cur_level);
14065   auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level);
14066   std::optional<Tensor> grad_hy_value;
14067   std::optional<int64_t> grad_hy_bdim;
14068   if (grad_hy) {
14069       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
14070   }
14071   std::optional<Tensor> grad_cy_value;
14072   std::optional<int64_t> grad_cy_bdim;
14073   if (grad_cy) {
14074       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
14075   }
14076   auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
14077   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
14078 }
14079 template <typename batch_rule_t, batch_rule_t batch_rule>
14080 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_generated_plumbing(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
14081   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14082   auto maybe_layer = maybeCurrentDynamicLayer();
14083   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14084   int64_t cur_level = maybe_layer->layerId();
14085   if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
14086     return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
14087   }
14088   auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level);
14089   auto [cy_value, cy_bdim] = unwrapTensorAtLevel(cy, cur_level);
14090   auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level);
14091   std::optional<Tensor> grad_hy_value;
14092   std::optional<int64_t> grad_hy_bdim;
14093   if (grad_hy) {
14094       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
14095   }
14096   std::optional<Tensor> grad_cy_value;
14097   std::optional<int64_t> grad_cy_bdim;
14098   if (grad_cy) {
14099       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
14100   }
14101   auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
14102   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
14103 }
14104 template <typename batch_rule_t, batch_rule_t batch_rule>
14105 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward_generated_plumbing(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
14106   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14107   auto maybe_layer = maybeCurrentDynamicLayer();
14108   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14109   int64_t cur_level = maybe_layer->layerId();
14110   if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) {
14111     return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
14112   }
14113   auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level);
14114   auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level);
14115   auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level);
14116   auto [cy_value, cy_bdim] = unwrapTensorAtLevel(cy, cur_level);
14117   std::optional<Tensor> grad_hy_value;
14118   std::optional<int64_t> grad_hy_bdim;
14119   if (grad_hy) {
14120       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
14121   }
14122   std::optional<Tensor> grad_cy_value;
14123   std::optional<int64_t> grad_cy_bdim;
14124   if (grad_cy) {
14125       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
14126   }
14127   std::optional<Tensor> input_bias_value;
14128   std::optional<int64_t> input_bias_bdim;
14129   if (input_bias) {
14130       std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
14131   }
14132   std::optional<Tensor> hidden_bias_value;
14133   std::optional<int64_t> hidden_bias_bdim;
14134   if (hidden_bias) {
14135       std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
14136   }
14137   auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim);
14138   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
14139 }
14140 template <typename batch_rule_t, batch_rule_t batch_rule>
14141 ::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
14142   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14143   auto maybe_layer = maybeCurrentDynamicLayer();
14144   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14145   int64_t cur_level = maybe_layer->layerId();
14146   if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
14147     return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
14148   }
14149   auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level);
14150   auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level);
14151   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14152   std::optional<Tensor> input_bias_value;
14153   std::optional<int64_t> input_bias_bdim;
14154   if (input_bias) {
14155       std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
14156   }
14157   std::optional<Tensor> hidden_bias_value;
14158   std::optional<int64_t> hidden_bias_bdim;
14159   if (hidden_bias) {
14160       std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
14161   }
14162   auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
14163   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14164 }
14165 template <typename batch_rule_t, batch_rule_t batch_rule>
14166 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
14167   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14168   auto maybe_layer = maybeCurrentDynamicLayer();
14169   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14170   int64_t cur_level = maybe_layer->layerId();
14171   if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
14172     return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias);
14173   }
14174   auto [grad_hy_value, grad_hy_bdim] = unwrapTensorAtLevel(grad_hy, cur_level);
14175   auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level);
14176   auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias);
14177   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
14178 }
14179 template <typename batch_rule_t, batch_rule_t batch_rule>
14180 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
14181   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14182   auto maybe_layer = maybeCurrentDynamicLayer();
14183   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14184   int64_t cur_level = maybe_layer->layerId();
14185   if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
14186     return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
14187   }
14188   auto [grad_hy_value, grad_hy_bdim] = unwrapTensorAtLevel(grad_hy, cur_level);
14189   auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level);
14190   auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level);
14191   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14192   std::optional<Tensor> input_bias_value;
14193   std::optional<int64_t> input_bias_bdim;
14194   if (input_bias) {
14195       std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
14196   }
14197   std::optional<Tensor> hidden_bias_value;
14198   std::optional<int64_t> hidden_bias_bdim;
14199   if (hidden_bias) {
14200       std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
14201   }
14202   auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
14203   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
14204 }
14205 template <typename batch_rule_t, batch_rule_t batch_rule>
14206 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
14207   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14208   auto maybe_layer = maybeCurrentDynamicLayer();
14209   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14210   int64_t cur_level = maybe_layer->layerId();
14211   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14212     return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14213   }
14214   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14215   auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14216   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
14217 }
14218 template <typename batch_rule_t, batch_rule_t batch_rule>
14219 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
14220   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14221   auto maybe_layer = maybeCurrentDynamicLayer();
14222   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14223   int64_t cur_level = maybe_layer->layerId();
14224   if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14225     return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
14226   }
14227   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
14228   auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level);
14229   auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional);
14230   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
14231 }
14232 template <typename batch_rule_t, batch_rule_t batch_rule>
14233 ::std::tuple<at::Tensor,at::Tensor> gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
14234   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14235   auto maybe_layer = maybeCurrentDynamicLayer();
14236   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14237   int64_t cur_level = maybe_layer->layerId();
14238   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14239     return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14240   }
14241   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14242   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14243   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14244   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14245 }
14246 template <typename batch_rule_t, batch_rule_t batch_rule>
14247 ::std::tuple<at::Tensor,at::Tensor> gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
14248   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14249   auto maybe_layer = maybeCurrentDynamicLayer();
14250   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14251   int64_t cur_level = maybe_layer->layerId();
14252   if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14253     return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
14254   }
14255   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
14256   auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level);
14257   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14258   auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
14259   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14260 }
14261 template <typename batch_rule_t, batch_rule_t batch_rule>
14262 ::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
14263   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14264   auto maybe_layer = maybeCurrentDynamicLayer();
14265   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14266   int64_t cur_level = maybe_layer->layerId();
14267   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14268     return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14269   }
14270   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14271   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14272   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14273   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14274 }
14275 template <typename batch_rule_t, batch_rule_t batch_rule>
14276 ::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
14277   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14278   auto maybe_layer = maybeCurrentDynamicLayer();
14279   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14280   int64_t cur_level = maybe_layer->layerId();
14281   if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14282     return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
14283   }
14284   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
14285   auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level);
14286   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14287   auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
14288   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14289 }
14290 template <typename batch_rule_t, batch_rule_t batch_rule>
14291 ::std::tuple<at::Tensor,at::Tensor> rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
14292   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14293   auto maybe_layer = maybeCurrentDynamicLayer();
14294   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14295   int64_t cur_level = maybe_layer->layerId();
14296   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14297     return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14298   }
14299   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14300   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14301   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
14302   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14303 }
14304 template <typename batch_rule_t, batch_rule_t batch_rule>
14305 ::std::tuple<at::Tensor,at::Tensor> rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
14306   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14307   auto maybe_layer = maybeCurrentDynamicLayer();
14308   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14309   int64_t cur_level = maybe_layer->layerId();
14310   if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
14311     return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
14312   }
14313   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
14314   auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level);
14315   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14316   auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
14317   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14318 }
14319 template <typename batch_rule_t, batch_rule_t batch_rule>
14320 ::std::tuple<at::Tensor,at::Tensor> lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
14321   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14322   auto maybe_layer = maybeCurrentDynamicLayer();
14323   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14324   int64_t cur_level = maybe_layer->layerId();
14325   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
14326     return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
14327   }
14328   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14329   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14330   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14331   std::optional<Tensor> b_ih_value;
14332   std::optional<int64_t> b_ih_bdim;
14333   if (b_ih) {
14334       std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
14335   }
14336   std::optional<Tensor> b_hh_value;
14337   std::optional<int64_t> b_hh_bdim;
14338   if (b_hh) {
14339       std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
14340   }
14341   auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
14342   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14343 }
14344 template <typename batch_rule_t, batch_rule_t batch_rule>
14345 at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
14346   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14347   auto maybe_layer = maybeCurrentDynamicLayer();
14348   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14349   int64_t cur_level = maybe_layer->layerId();
14350   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
14351     return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
14352   }
14353   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14354   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14355   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14356   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14357   std::optional<Tensor> b_ih_value;
14358   std::optional<int64_t> b_ih_bdim;
14359   if (b_ih) {
14360       std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
14361   }
14362   std::optional<Tensor> b_hh_value;
14363   std::optional<int64_t> b_hh_bdim;
14364   if (b_hh) {
14365       std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
14366   }
14367   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
14368   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14369 }
14370 template <typename batch_rule_t, batch_rule_t batch_rule>
14371 at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
14372   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14373   auto maybe_layer = maybeCurrentDynamicLayer();
14374   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14375   int64_t cur_level = maybe_layer->layerId();
14376   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
14377     return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
14378   }
14379   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14380   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14381   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14382   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14383   std::optional<Tensor> b_ih_value;
14384   std::optional<int64_t> b_ih_bdim;
14385   if (b_ih) {
14386       std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
14387   }
14388   std::optional<Tensor> b_hh_value;
14389   std::optional<int64_t> b_hh_bdim;
14390   if (b_hh) {
14391       std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
14392   }
14393   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
14394   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14395 }
14396 template <typename batch_rule_t, batch_rule_t batch_rule>
14397 at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
14398   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14399   auto maybe_layer = maybeCurrentDynamicLayer();
14400   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14401   int64_t cur_level = maybe_layer->layerId();
14402   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
14403     return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
14404   }
14405   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14406   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14407   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14408   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14409   std::optional<Tensor> b_ih_value;
14410   std::optional<int64_t> b_ih_bdim;
14411   if (b_ih) {
14412       std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
14413   }
14414   std::optional<Tensor> b_hh_value;
14415   std::optional<int64_t> b_hh_bdim;
14416   if (b_hh) {
14417       std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
14418   }
14419   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
14420   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14421 }
14422 template <typename batch_rule_t, batch_rule_t batch_rule>
14423 ::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
14424   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14425   auto maybe_layer = maybeCurrentDynamicLayer();
14426   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14427   int64_t cur_level = maybe_layer->layerId();
14428   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
14429     return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14430   }
14431   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14432   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14433   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14434   auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level);
14435   auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level);
14436   auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level);
14437   auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level);
14438   auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level);
14439   auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level);
14440   auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14441   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14442 }
14443 template <typename batch_rule_t, batch_rule_t batch_rule>
14444 at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
14445   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14446   auto maybe_layer = maybeCurrentDynamicLayer();
14447   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14448   int64_t cur_level = maybe_layer->layerId();
14449   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
14450     return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14451   }
14452   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14453   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14454   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14455   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14456   auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level);
14457   auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level);
14458   auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level);
14459   auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level);
14460   auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level);
14461   auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level);
14462   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14463   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14464 }
14465 template <typename batch_rule_t, batch_rule_t batch_rule>
14466 at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
14467   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14468   auto maybe_layer = maybeCurrentDynamicLayer();
14469   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14470   int64_t cur_level = maybe_layer->layerId();
14471   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
14472     return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14473   }
14474   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14475   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14476   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14477   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14478   auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level);
14479   auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level);
14480   auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level);
14481   auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level);
14482   auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level);
14483   auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level);
14484   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14485   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14486 }
14487 template <typename batch_rule_t, batch_rule_t batch_rule>
14488 at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
14489   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14490   auto maybe_layer = maybeCurrentDynamicLayer();
14491   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14492   int64_t cur_level = maybe_layer->layerId();
14493   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
14494     return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14495   }
14496   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14497   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
14498   auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level);
14499   auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level);
14500   auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level);
14501   auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level);
14502   auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level);
14503   auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level);
14504   auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level);
14505   auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level);
14506   auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
14507   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14508 }
14509 template <typename batch_rule_t, batch_rule_t batch_rule>
14510 ::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
14511   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14512   auto maybe_layer = maybeCurrentDynamicLayer();
14513   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14514   int64_t cur_level = maybe_layer->layerId();
14515   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
14516     return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first);
14517   }
14518   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
14519   auto [lengths_value, lengths_bdim] = unwrapTensorAtLevel(lengths, cur_level);
14520   auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first);
14521   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14522 }
14523 template <typename batch_rule_t, batch_rule_t batch_rule>
14524 at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
14525   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14526   auto maybe_layer = maybeCurrentDynamicLayer();
14527   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14528   int64_t cur_level = maybe_layer->layerId();
14529   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
14530     return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
14531   }
14532   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
14533   auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level);
14534   auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first);
14535   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14536 }
14537 template <typename batch_rule_t, batch_rule_t batch_rule>
14538 ::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
14539   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14540   auto maybe_layer = maybeCurrentDynamicLayer();
14541   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14542   int64_t cur_level = maybe_layer->layerId();
14543   if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
14544     return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length);
14545   }
14546   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
14547   auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level);
14548   auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length);
14549   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
14550 }
14551 template <typename batch_rule_t, batch_rule_t batch_rule>
14552 at::Tensor lift_generated_plumbing(const at::Tensor & self) {
14553   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14554   auto maybe_layer = maybeCurrentDynamicLayer();
14555   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14556   int64_t cur_level = maybe_layer->layerId();
14557   if (!isBatchedAtLevel(self, cur_level)) {
14558     return at::_ops::lift::call(self);
14559   }
14560   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14561   auto results = batch_rule(self_value, self_bdim);
14562   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14563 }
14564 template <typename batch_rule_t, batch_rule_t batch_rule>
14565 at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) {
14566   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14567   auto maybe_layer = maybeCurrentDynamicLayer();
14568   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14569   int64_t cur_level = maybe_layer->layerId();
14570   if (!isBatchedAtLevel(self, cur_level)) {
14571     return at::_ops::lift_fresh::call(self);
14572   }
14573   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14574   auto results = batch_rule(self_value, self_bdim);
14575   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14576 }
14577 template <typename batch_rule_t, batch_rule_t batch_rule>
14578 at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) {
14579   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14580   auto maybe_layer = maybeCurrentDynamicLayer();
14581   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14582   int64_t cur_level = maybe_layer->layerId();
14583   if (!isBatchedAtLevel(self, cur_level)) {
14584     return at::_ops::lift_fresh_copy::call(self);
14585   }
14586   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14587   auto results = batch_rule(self_value, self_bdim);
14588   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14589 }
14590 template <typename batch_rule_t, batch_rule_t batch_rule>
14591 at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
14592   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14593   auto maybe_layer = maybeCurrentDynamicLayer();
14594   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14595   int64_t cur_level = maybe_layer->layerId();
14596   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
14597     return at::_ops::masked_fill__Scalar::call(self, mask, value);
14598   }
14599   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14600   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14601   batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
14602   return self;
14603 }
14604 template <typename batch_rule_t, batch_rule_t batch_rule>
14605 at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
14606   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14607   auto maybe_layer = maybeCurrentDynamicLayer();
14608   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14609   int64_t cur_level = maybe_layer->layerId();
14610   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
14611     return at::_ops::masked_fill_Scalar::call(self, mask, value);
14612   }
14613   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14614   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14615   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
14616   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14617 }
14618 template <typename batch_rule_t, batch_rule_t batch_rule>
14619 at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
14620   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14621   auto maybe_layer = maybeCurrentDynamicLayer();
14622   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14623   int64_t cur_level = maybe_layer->layerId();
14624   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
14625     return at::_ops::masked_fill__Tensor::call(self, mask, value);
14626   }
14627   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14628   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14629   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
14630   batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
14631   return self;
14632 }
14633 template <typename batch_rule_t, batch_rule_t batch_rule>
14634 at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
14635   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14636   auto maybe_layer = maybeCurrentDynamicLayer();
14637   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14638   int64_t cur_level = maybe_layer->layerId();
14639   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
14640     return at::_ops::masked_fill_Tensor::call(self, mask, value);
14641   }
14642   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14643   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14644   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
14645   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
14646   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14647 }
14648 template <typename batch_rule_t, batch_rule_t batch_rule>
14649 at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
14650   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14651   auto maybe_layer = maybeCurrentDynamicLayer();
14652   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14653   int64_t cur_level = maybe_layer->layerId();
14654   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14655     return at::_ops::masked_scatter_::call(self, mask, source);
14656   }
14657   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14658   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14659   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14660   batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
14661   return self;
14662 }
14663 template <typename batch_rule_t, batch_rule_t batch_rule>
14664 at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
14665   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14666   auto maybe_layer = maybeCurrentDynamicLayer();
14667   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14668   int64_t cur_level = maybe_layer->layerId();
14669   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14670     return at::_ops::masked_scatter::call(self, mask, source);
14671   }
14672   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14673   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14674   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14675   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
14676   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14677 }
14678 template <typename batch_rule_t, batch_rule_t batch_rule>
14679 at::Tensor masked_scatter_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
14680   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14681   auto maybe_layer = maybeCurrentDynamicLayer();
14682   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14683   int64_t cur_level = maybe_layer->layerId();
14684   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
14685     return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes);
14686   }
14687   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
14688   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14689   auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, sizes);
14690   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14691 }
14692 template <typename batch_rule_t, batch_rule_t batch_rule>
14693 at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type) {
14694   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14695   auto maybe_layer = maybeCurrentDynamicLayer();
14696   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14697   int64_t cur_level = maybe_layer->layerId();
14698   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
14699     return at::_ops::_masked_softmax::call(self, mask, dim, mask_type);
14700   }
14701   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14702   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14703   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type);
14704   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14705 }
14706 template <typename batch_rule_t, batch_rule_t batch_rule>
14707 at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim) {
14708   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14709   auto maybe_layer = maybeCurrentDynamicLayer();
14710   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14711   int64_t cur_level = maybe_layer->layerId();
14712   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
14713     return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim);
14714   }
14715   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
14716   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
14717   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
14718   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim);
14719   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14720 }
14721 template <typename batch_rule_t, batch_rule_t batch_rule>
14722 at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
14723   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14724   auto maybe_layer = maybeCurrentDynamicLayer();
14725   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14726   int64_t cur_level = maybe_layer->layerId();
14727   if (!isBatchedAtLevel(self, cur_level)) {
14728     return at::_ops::view::call(self, size);
14729   }
14730   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14731   auto results = batch_rule(self_value, self_bdim, size);
14732   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14733 }
14734 template <typename batch_rule_t, batch_rule_t batch_rule>
14735 at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
14736   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14737   auto maybe_layer = maybeCurrentDynamicLayer();
14738   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14739   int64_t cur_level = maybe_layer->layerId();
14740   if (!isBatchedAtLevel(self, cur_level)) {
14741     return at::_ops::view_dtype::call(self, dtype);
14742   }
14743   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14744   auto results = batch_rule(self_value, self_bdim, dtype);
14745   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14746 }
14747 template <typename batch_rule_t, batch_rule_t batch_rule>
14748 at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
14749   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14750   auto maybe_layer = maybeCurrentDynamicLayer();
14751   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14752   int64_t cur_level = maybe_layer->layerId();
14753   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14754     return at::_ops::put_::call(self, index, source, accumulate);
14755   }
14756   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14757   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14758   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14759   batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
14760   return self;
14761 }
14762 template <typename batch_rule_t, batch_rule_t batch_rule>
14763 at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
14764   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14765   auto maybe_layer = maybeCurrentDynamicLayer();
14766   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14767   int64_t cur_level = maybe_layer->layerId();
14768   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14769     return at::_ops::put::call(self, index, source, accumulate);
14770   }
14771   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14772   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14773   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14774   auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
14775   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14776 }
14777 template <typename batch_rule_t, batch_rule_t batch_rule>
14778 at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
14779   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14780   auto maybe_layer = maybeCurrentDynamicLayer();
14781   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14782   int64_t cur_level = maybe_layer->layerId();
14783   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14784     return at::_ops::index_add_::call(self, dim, index, source, alpha);
14785   }
14786   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14787   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14788   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14789   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
14790   return self;
14791 }
14792 template <typename batch_rule_t, batch_rule_t batch_rule>
14793 at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
14794   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14795   auto maybe_layer = maybeCurrentDynamicLayer();
14796   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14797   int64_t cur_level = maybe_layer->layerId();
14798   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14799     return at::_ops::index_add::call(self, dim, index, source, alpha);
14800   }
14801   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14802   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14803   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14804   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
14805   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14806 }
14807 template <typename batch_rule_t, batch_rule_t batch_rule>
14808 at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
14809   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14810   auto maybe_layer = maybeCurrentDynamicLayer();
14811   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14812   int64_t cur_level = maybe_layer->layerId();
14813   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14814     return at::_ops::index_add_dimname::call(self, dim, index, source, alpha);
14815   }
14816   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14817   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14818   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14819   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
14820   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14821 }
14822 template <typename batch_rule_t, batch_rule_t batch_rule>
14823 at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
14824   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14825   auto maybe_layer = maybeCurrentDynamicLayer();
14826   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14827   int64_t cur_level = maybe_layer->layerId();
14828   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14829     return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self);
14830   }
14831   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14832   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14833   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14834   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
14835   return self;
14836 }
14837 template <typename batch_rule_t, batch_rule_t batch_rule>
14838 at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
14839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14840   auto maybe_layer = maybeCurrentDynamicLayer();
14841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14842   int64_t cur_level = maybe_layer->layerId();
14843   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
14844     return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self);
14845   }
14846   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14847   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14848   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
14849   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
14850   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14851 }
14852 template <typename batch_rule_t, batch_rule_t batch_rule>
14853 at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
14854   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14855   auto maybe_layer = maybeCurrentDynamicLayer();
14856   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14857   int64_t cur_level = maybe_layer->layerId();
14858   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
14859     return at::_ops::index_fill__int_Scalar::call(self, dim, index, value);
14860   }
14861   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14862   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14863   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
14864   return self;
14865 }
14866 template <typename batch_rule_t, batch_rule_t batch_rule>
14867 at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
14868   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14869   auto maybe_layer = maybeCurrentDynamicLayer();
14870   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14871   int64_t cur_level = maybe_layer->layerId();
14872   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
14873     return at::_ops::index_fill_int_Scalar::call(self, dim, index, value);
14874   }
14875   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14876   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14877   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
14878   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14879 }
14880 template <typename batch_rule_t, batch_rule_t batch_rule>
14881 at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
14882   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14883   auto maybe_layer = maybeCurrentDynamicLayer();
14884   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14885   int64_t cur_level = maybe_layer->layerId();
14886   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
14887     return at::_ops::index_fill__int_Tensor::call(self, dim, index, value);
14888   }
14889   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14890   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14891   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
14892   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
14893   return self;
14894 }
14895 template <typename batch_rule_t, batch_rule_t batch_rule>
14896 at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
14897   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14898   auto maybe_layer = maybeCurrentDynamicLayer();
14899   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14900   int64_t cur_level = maybe_layer->layerId();
14901   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
14902     return at::_ops::index_fill_int_Tensor::call(self, dim, index, value);
14903   }
14904   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14905   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14906   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
14907   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
14908   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14909 }
14910 template <typename batch_rule_t, batch_rule_t batch_rule>
14911 at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
14912   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14913   auto maybe_layer = maybeCurrentDynamicLayer();
14914   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14915   int64_t cur_level = maybe_layer->layerId();
14916   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
14917     return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value);
14918   }
14919   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14920   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14921   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
14922   return self;
14923 }
14924 template <typename batch_rule_t, batch_rule_t batch_rule>
14925 at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
14926   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14927   auto maybe_layer = maybeCurrentDynamicLayer();
14928   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14929   int64_t cur_level = maybe_layer->layerId();
14930   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
14931     return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value);
14932   }
14933   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14934   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14935   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
14936   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
14937   return self;
14938 }
14939 template <typename batch_rule_t, batch_rule_t batch_rule>
14940 at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
14941   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14942   auto maybe_layer = maybeCurrentDynamicLayer();
14943   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14944   int64_t cur_level = maybe_layer->layerId();
14945   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
14946     return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value);
14947   }
14948   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14949   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14950   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
14951   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14952 }
14953 template <typename batch_rule_t, batch_rule_t batch_rule>
14954 at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
14955   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14956   auto maybe_layer = maybeCurrentDynamicLayer();
14957   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14958   int64_t cur_level = maybe_layer->layerId();
14959   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
14960     return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value);
14961   }
14962   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14963   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14964   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
14965   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
14966   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14967 }
14968 template <typename batch_rule_t, batch_rule_t batch_rule>
14969 at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
14970   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14971   auto maybe_layer = maybeCurrentDynamicLayer();
14972   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14973   int64_t cur_level = maybe_layer->layerId();
14974   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
14975     return at::_ops::scatter_src::call(self, dim, index, src);
14976   }
14977   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14978   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14979   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
14980   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
14981   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14982 }
14983 template <typename batch_rule_t, batch_rule_t batch_rule>
14984 at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
14985   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14986   auto maybe_layer = maybeCurrentDynamicLayer();
14987   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14988   int64_t cur_level = maybe_layer->layerId();
14989   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
14990     return at::_ops::scatter__src::call(self, dim, index, src);
14991   }
14992   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
14993   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
14994   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
14995   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
14996   return self;
14997 }
14998 template <typename batch_rule_t, batch_rule_t batch_rule>
14999 at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
15000   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15001   auto maybe_layer = maybeCurrentDynamicLayer();
15002   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15003   int64_t cur_level = maybe_layer->layerId();
15004   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
15005     return at::_ops::scatter_value::call(self, dim, index, value);
15006   }
15007   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15008   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15009   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
15010   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15011 }
15012 template <typename batch_rule_t, batch_rule_t batch_rule>
15013 at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
15014   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15015   auto maybe_layer = maybeCurrentDynamicLayer();
15016   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15017   int64_t cur_level = maybe_layer->layerId();
15018   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
15019     return at::_ops::scatter__value::call(self, dim, index, value);
15020   }
15021   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15022   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15023   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
15024   return self;
15025 }
15026 template <typename batch_rule_t, batch_rule_t batch_rule>
15027 at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
15028   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15029   auto maybe_layer = maybeCurrentDynamicLayer();
15030   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15031   int64_t cur_level = maybe_layer->layerId();
15032   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15033     return at::_ops::scatter_reduce::call(self, dim, index, src, reduce);
15034   }
15035   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15036   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15037   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15038   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
15039   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15040 }
15041 template <typename batch_rule_t, batch_rule_t batch_rule>
15042 at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
15043   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15044   auto maybe_layer = maybeCurrentDynamicLayer();
15045   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15046   int64_t cur_level = maybe_layer->layerId();
15047   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15048     return at::_ops::scatter__reduce::call(self, dim, index, src, reduce);
15049   }
15050   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15051   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15052   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15053   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
15054   return self;
15055 }
15056 template <typename batch_rule_t, batch_rule_t batch_rule>
15057 at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
15058   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15059   auto maybe_layer = maybeCurrentDynamicLayer();
15060   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15061   int64_t cur_level = maybe_layer->layerId();
15062   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
15063     return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce);
15064   }
15065   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15066   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15067   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
15068   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15069 }
15070 template <typename batch_rule_t, batch_rule_t batch_rule>
15071 at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
15072   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15073   auto maybe_layer = maybeCurrentDynamicLayer();
15074   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15075   int64_t cur_level = maybe_layer->layerId();
15076   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
15077     return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce);
15078   }
15079   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15080   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15081   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
15082   return self;
15083 }
15084 template <typename batch_rule_t, batch_rule_t batch_rule>
15085 at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
15086   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15087   auto maybe_layer = maybeCurrentDynamicLayer();
15088   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15089   int64_t cur_level = maybe_layer->layerId();
15090   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15091     return at::_ops::scatter_dimname_src::call(self, dim, index, src);
15092   }
15093   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15094   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15095   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15096   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
15097   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15098 }
15099 template <typename batch_rule_t, batch_rule_t batch_rule>
15100 at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
15101   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15102   auto maybe_layer = maybeCurrentDynamicLayer();
15103   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15104   int64_t cur_level = maybe_layer->layerId();
15105   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
15106     return at::_ops::scatter_dimname_value::call(self, dim, index, value);
15107   }
15108   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15109   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15110   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
15111   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15112 }
15113 template <typename batch_rule_t, batch_rule_t batch_rule>
15114 at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
15115   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15116   auto maybe_layer = maybeCurrentDynamicLayer();
15117   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15118   int64_t cur_level = maybe_layer->layerId();
15119   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15120     return at::_ops::scatter_add::call(self, dim, index, src);
15121   }
15122   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15123   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15124   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15125   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
15126   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15127 }
15128 template <typename batch_rule_t, batch_rule_t batch_rule>
15129 at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
15130   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15131   auto maybe_layer = maybeCurrentDynamicLayer();
15132   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15133   int64_t cur_level = maybe_layer->layerId();
15134   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15135     return at::_ops::scatter_add_::call(self, dim, index, src);
15136   }
15137   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15138   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15139   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15140   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
15141   return self;
15142 }
15143 template <typename batch_rule_t, batch_rule_t batch_rule>
15144 at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
15145   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15146   auto maybe_layer = maybeCurrentDynamicLayer();
15147   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15148   int64_t cur_level = maybe_layer->layerId();
15149   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15150     return at::_ops::scatter_add_dimname::call(self, dim, index, src);
15151   }
15152   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15153   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15154   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15155   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
15156   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15157 }
15158 template <typename batch_rule_t, batch_rule_t batch_rule>
15159 at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
15160   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15161   auto maybe_layer = maybeCurrentDynamicLayer();
15162   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15163   int64_t cur_level = maybe_layer->layerId();
15164   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15165     return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self);
15166   }
15167   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15168   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15169   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15170   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
15171   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15172 }
15173 template <typename batch_rule_t, batch_rule_t batch_rule>
15174 at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
15175   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15176   auto maybe_layer = maybeCurrentDynamicLayer();
15177   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15178   int64_t cur_level = maybe_layer->layerId();
15179   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
15180     return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self);
15181   }
15182   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15183   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
15184   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
15185   batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
15186   return self;
15187 }
15188 template <typename batch_rule_t, batch_rule_t batch_rule>
15189 at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15190   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15191   auto maybe_layer = maybeCurrentDynamicLayer();
15192   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15193   int64_t cur_level = maybe_layer->layerId();
15194   if (!isBatchedAtLevel(self, cur_level)) {
15195     return at::_ops::eq__Scalar::call(self, other);
15196   }
15197   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15198   batch_rule(self_value, self_bdim, other);
15199   return self;
15200 }
15201 template <typename batch_rule_t, batch_rule_t batch_rule>
15202 at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15203   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15204   auto maybe_layer = maybeCurrentDynamicLayer();
15205   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15206   int64_t cur_level = maybe_layer->layerId();
15207   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15208     return at::_ops::eq__Tensor::call(self, other);
15209   }
15210   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15211   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15212   batch_rule(self_value, self_bdim, other_value, other_bdim);
15213   return self;
15214 }
15215 template <typename batch_rule_t, batch_rule_t batch_rule>
15216 at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15217   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15218   auto maybe_layer = maybeCurrentDynamicLayer();
15219   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15220   int64_t cur_level = maybe_layer->layerId();
15221   if (!isBatchedAtLevel(self, cur_level)) {
15222     return at::_ops::bitwise_and_Scalar::call(self, other);
15223   }
15224   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15225   auto results = batch_rule(self_value, self_bdim, other);
15226   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15227 }
15228 template <typename batch_rule_t, batch_rule_t batch_rule>
15229 at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
15230   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15231   auto maybe_layer = maybeCurrentDynamicLayer();
15232   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15233   int64_t cur_level = maybe_layer->layerId();
15234   if (!isBatchedAtLevel(other, cur_level)) {
15235     return at::_ops::bitwise_and_Scalar_Tensor::call(self, other);
15236   }
15237   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15238   auto results = batch_rule(self, other_value, other_bdim);
15239   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15240 }
15241 template <typename batch_rule_t, batch_rule_t batch_rule>
15242 at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15243   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15244   auto maybe_layer = maybeCurrentDynamicLayer();
15245   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15246   int64_t cur_level = maybe_layer->layerId();
15247   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15248     return at::_ops::bitwise_and_Tensor::call(self, other);
15249   }
15250   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15251   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15252   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15253   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15254 }
15255 template <typename batch_rule_t, batch_rule_t batch_rule>
15256 at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15257   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15258   auto maybe_layer = maybeCurrentDynamicLayer();
15259   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15260   int64_t cur_level = maybe_layer->layerId();
15261   if (!isBatchedAtLevel(self, cur_level)) {
15262     return at::_ops::bitwise_and__Scalar::call(self, other);
15263   }
15264   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15265   batch_rule(self_value, self_bdim, other);
15266   return self;
15267 }
15268 template <typename batch_rule_t, batch_rule_t batch_rule>
15269 at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15270   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15271   auto maybe_layer = maybeCurrentDynamicLayer();
15272   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15273   int64_t cur_level = maybe_layer->layerId();
15274   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15275     return at::_ops::bitwise_and__Tensor::call(self, other);
15276   }
15277   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15278   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15279   batch_rule(self_value, self_bdim, other_value, other_bdim);
15280   return self;
15281 }
15282 template <typename batch_rule_t, batch_rule_t batch_rule>
15283 at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15284   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15285   auto maybe_layer = maybeCurrentDynamicLayer();
15286   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15287   int64_t cur_level = maybe_layer->layerId();
15288   if (!isBatchedAtLevel(self, cur_level)) {
15289     return at::_ops::__and___Scalar::call(self, other);
15290   }
15291   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15292   auto results = batch_rule(self_value, self_bdim, other);
15293   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15294 }
15295 template <typename batch_rule_t, batch_rule_t batch_rule>
15296 at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15297   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15298   auto maybe_layer = maybeCurrentDynamicLayer();
15299   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15300   int64_t cur_level = maybe_layer->layerId();
15301   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15302     return at::_ops::__and___Tensor::call(self, other);
15303   }
15304   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15305   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15306   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15307   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15308 }
15309 template <typename batch_rule_t, batch_rule_t batch_rule>
15310 at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15311   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15312   auto maybe_layer = maybeCurrentDynamicLayer();
15313   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15314   int64_t cur_level = maybe_layer->layerId();
15315   if (!isBatchedAtLevel(self, cur_level)) {
15316     return at::_ops::__iand___Scalar::call(self, other);
15317   }
15318   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15319   batch_rule(self_value, self_bdim, other);
15320   return self;
15321 }
15322 template <typename batch_rule_t, batch_rule_t batch_rule>
15323 at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15324   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15325   auto maybe_layer = maybeCurrentDynamicLayer();
15326   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15327   int64_t cur_level = maybe_layer->layerId();
15328   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15329     return at::_ops::__iand___Tensor::call(self, other);
15330   }
15331   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15332   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15333   batch_rule(self_value, self_bdim, other_value, other_bdim);
15334   return self;
15335 }
15336 template <typename batch_rule_t, batch_rule_t batch_rule>
15337 at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15338   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15339   auto maybe_layer = maybeCurrentDynamicLayer();
15340   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15341   int64_t cur_level = maybe_layer->layerId();
15342   if (!isBatchedAtLevel(self, cur_level)) {
15343     return at::_ops::bitwise_or_Scalar::call(self, other);
15344   }
15345   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15346   auto results = batch_rule(self_value, self_bdim, other);
15347   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15348 }
15349 template <typename batch_rule_t, batch_rule_t batch_rule>
15350 at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
15351   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15352   auto maybe_layer = maybeCurrentDynamicLayer();
15353   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15354   int64_t cur_level = maybe_layer->layerId();
15355   if (!isBatchedAtLevel(other, cur_level)) {
15356     return at::_ops::bitwise_or_Scalar_Tensor::call(self, other);
15357   }
15358   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15359   auto results = batch_rule(self, other_value, other_bdim);
15360   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15361 }
15362 template <typename batch_rule_t, batch_rule_t batch_rule>
15363 at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15364   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15365   auto maybe_layer = maybeCurrentDynamicLayer();
15366   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15367   int64_t cur_level = maybe_layer->layerId();
15368   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15369     return at::_ops::bitwise_or_Tensor::call(self, other);
15370   }
15371   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15372   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15373   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15374   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15375 }
15376 template <typename batch_rule_t, batch_rule_t batch_rule>
15377 at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15378   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15379   auto maybe_layer = maybeCurrentDynamicLayer();
15380   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15381   int64_t cur_level = maybe_layer->layerId();
15382   if (!isBatchedAtLevel(self, cur_level)) {
15383     return at::_ops::bitwise_or__Scalar::call(self, other);
15384   }
15385   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15386   batch_rule(self_value, self_bdim, other);
15387   return self;
15388 }
15389 template <typename batch_rule_t, batch_rule_t batch_rule>
15390 at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15391   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15392   auto maybe_layer = maybeCurrentDynamicLayer();
15393   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15394   int64_t cur_level = maybe_layer->layerId();
15395   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15396     return at::_ops::bitwise_or__Tensor::call(self, other);
15397   }
15398   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15399   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15400   batch_rule(self_value, self_bdim, other_value, other_bdim);
15401   return self;
15402 }
15403 template <typename batch_rule_t, batch_rule_t batch_rule>
15404 at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15405   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15406   auto maybe_layer = maybeCurrentDynamicLayer();
15407   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15408   int64_t cur_level = maybe_layer->layerId();
15409   if (!isBatchedAtLevel(self, cur_level)) {
15410     return at::_ops::__or___Scalar::call(self, other);
15411   }
15412   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15413   auto results = batch_rule(self_value, self_bdim, other);
15414   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15415 }
15416 template <typename batch_rule_t, batch_rule_t batch_rule>
15417 at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15418   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15419   auto maybe_layer = maybeCurrentDynamicLayer();
15420   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15421   int64_t cur_level = maybe_layer->layerId();
15422   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15423     return at::_ops::__or___Tensor::call(self, other);
15424   }
15425   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15426   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15427   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15428   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15429 }
15430 template <typename batch_rule_t, batch_rule_t batch_rule>
15431 at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15432   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15433   auto maybe_layer = maybeCurrentDynamicLayer();
15434   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15435   int64_t cur_level = maybe_layer->layerId();
15436   if (!isBatchedAtLevel(self, cur_level)) {
15437     return at::_ops::__ior___Scalar::call(self, other);
15438   }
15439   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15440   batch_rule(self_value, self_bdim, other);
15441   return self;
15442 }
15443 template <typename batch_rule_t, batch_rule_t batch_rule>
15444 at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15445   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15446   auto maybe_layer = maybeCurrentDynamicLayer();
15447   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15448   int64_t cur_level = maybe_layer->layerId();
15449   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15450     return at::_ops::__ior___Tensor::call(self, other);
15451   }
15452   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15453   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15454   batch_rule(self_value, self_bdim, other_value, other_bdim);
15455   return self;
15456 }
15457 template <typename batch_rule_t, batch_rule_t batch_rule>
15458 at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15459   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15460   auto maybe_layer = maybeCurrentDynamicLayer();
15461   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15462   int64_t cur_level = maybe_layer->layerId();
15463   if (!isBatchedAtLevel(self, cur_level)) {
15464     return at::_ops::bitwise_xor_Scalar::call(self, other);
15465   }
15466   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15467   auto results = batch_rule(self_value, self_bdim, other);
15468   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15469 }
15470 template <typename batch_rule_t, batch_rule_t batch_rule>
15471 at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
15472   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15473   auto maybe_layer = maybeCurrentDynamicLayer();
15474   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15475   int64_t cur_level = maybe_layer->layerId();
15476   if (!isBatchedAtLevel(other, cur_level)) {
15477     return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other);
15478   }
15479   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15480   auto results = batch_rule(self, other_value, other_bdim);
15481   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15482 }
15483 template <typename batch_rule_t, batch_rule_t batch_rule>
15484 at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15485   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15486   auto maybe_layer = maybeCurrentDynamicLayer();
15487   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15488   int64_t cur_level = maybe_layer->layerId();
15489   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15490     return at::_ops::bitwise_xor_Tensor::call(self, other);
15491   }
15492   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15493   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15494   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15495   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15496 }
15497 template <typename batch_rule_t, batch_rule_t batch_rule>
15498 at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15499   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15500   auto maybe_layer = maybeCurrentDynamicLayer();
15501   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15502   int64_t cur_level = maybe_layer->layerId();
15503   if (!isBatchedAtLevel(self, cur_level)) {
15504     return at::_ops::bitwise_xor__Scalar::call(self, other);
15505   }
15506   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15507   batch_rule(self_value, self_bdim, other);
15508   return self;
15509 }
15510 template <typename batch_rule_t, batch_rule_t batch_rule>
15511 at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15512   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15513   auto maybe_layer = maybeCurrentDynamicLayer();
15514   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15515   int64_t cur_level = maybe_layer->layerId();
15516   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15517     return at::_ops::bitwise_xor__Tensor::call(self, other);
15518   }
15519   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15520   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15521   batch_rule(self_value, self_bdim, other_value, other_bdim);
15522   return self;
15523 }
15524 template <typename batch_rule_t, batch_rule_t batch_rule>
15525 at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15526   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15527   auto maybe_layer = maybeCurrentDynamicLayer();
15528   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15529   int64_t cur_level = maybe_layer->layerId();
15530   if (!isBatchedAtLevel(self, cur_level)) {
15531     return at::_ops::__xor___Scalar::call(self, other);
15532   }
15533   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15534   auto results = batch_rule(self_value, self_bdim, other);
15535   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15536 }
15537 template <typename batch_rule_t, batch_rule_t batch_rule>
15538 at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15539   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15540   auto maybe_layer = maybeCurrentDynamicLayer();
15541   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15542   int64_t cur_level = maybe_layer->layerId();
15543   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15544     return at::_ops::__xor___Tensor::call(self, other);
15545   }
15546   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15547   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15548   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15549   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15550 }
15551 template <typename batch_rule_t, batch_rule_t batch_rule>
15552 at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15553   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15554   auto maybe_layer = maybeCurrentDynamicLayer();
15555   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15556   int64_t cur_level = maybe_layer->layerId();
15557   if (!isBatchedAtLevel(self, cur_level)) {
15558     return at::_ops::__ixor___Scalar::call(self, other);
15559   }
15560   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15561   batch_rule(self_value, self_bdim, other);
15562   return self;
15563 }
15564 template <typename batch_rule_t, batch_rule_t batch_rule>
15565 at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15566   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15567   auto maybe_layer = maybeCurrentDynamicLayer();
15568   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15569   int64_t cur_level = maybe_layer->layerId();
15570   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15571     return at::_ops::__ixor___Tensor::call(self, other);
15572   }
15573   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15574   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15575   batch_rule(self_value, self_bdim, other_value, other_bdim);
15576   return self;
15577 }
15578 template <typename batch_rule_t, batch_rule_t batch_rule>
15579 at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15580   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15581   auto maybe_layer = maybeCurrentDynamicLayer();
15582   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15583   int64_t cur_level = maybe_layer->layerId();
15584   if (!isBatchedAtLevel(self, cur_level)) {
15585     return at::_ops::__lshift___Scalar::call(self, other);
15586   }
15587   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15588   auto results = batch_rule(self_value, self_bdim, other);
15589   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15590 }
15591 template <typename batch_rule_t, batch_rule_t batch_rule>
15592 at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15593   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15594   auto maybe_layer = maybeCurrentDynamicLayer();
15595   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15596   int64_t cur_level = maybe_layer->layerId();
15597   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15598     return at::_ops::__lshift___Tensor::call(self, other);
15599   }
15600   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15601   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15602   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15603   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15604 }
15605 template <typename batch_rule_t, batch_rule_t batch_rule>
15606 at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15607   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15608   auto maybe_layer = maybeCurrentDynamicLayer();
15609   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15610   int64_t cur_level = maybe_layer->layerId();
15611   if (!isBatchedAtLevel(self, cur_level)) {
15612     return at::_ops::__ilshift___Scalar::call(self, other);
15613   }
15614   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15615   batch_rule(self_value, self_bdim, other);
15616   return self;
15617 }
15618 template <typename batch_rule_t, batch_rule_t batch_rule>
15619 at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15620   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15621   auto maybe_layer = maybeCurrentDynamicLayer();
15622   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15623   int64_t cur_level = maybe_layer->layerId();
15624   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15625     return at::_ops::__ilshift___Tensor::call(self, other);
15626   }
15627   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15628   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15629   batch_rule(self_value, self_bdim, other_value, other_bdim);
15630   return self;
15631 }
15632 template <typename batch_rule_t, batch_rule_t batch_rule>
15633 at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15634   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15635   auto maybe_layer = maybeCurrentDynamicLayer();
15636   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15637   int64_t cur_level = maybe_layer->layerId();
15638   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15639     return at::_ops::bitwise_left_shift_Tensor::call(self, other);
15640   }
15641   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15642   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15643   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15644   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15645 }
15646 template <typename batch_rule_t, batch_rule_t batch_rule>
15647 at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15648   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15649   auto maybe_layer = maybeCurrentDynamicLayer();
15650   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15651   int64_t cur_level = maybe_layer->layerId();
15652   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15653     return at::_ops::bitwise_left_shift__Tensor::call(self, other);
15654   }
15655   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15656   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15657   batch_rule(self_value, self_bdim, other_value, other_bdim);
15658   return self;
15659 }
15660 template <typename batch_rule_t, batch_rule_t batch_rule>
15661 at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15662   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15663   auto maybe_layer = maybeCurrentDynamicLayer();
15664   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15665   int64_t cur_level = maybe_layer->layerId();
15666   if (!isBatchedAtLevel(self, cur_level)) {
15667     return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other);
15668   }
15669   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15670   auto results = batch_rule(self_value, self_bdim, other);
15671   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15672 }
15673 template <typename batch_rule_t, batch_rule_t batch_rule>
15674 at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15675   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15676   auto maybe_layer = maybeCurrentDynamicLayer();
15677   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15678   int64_t cur_level = maybe_layer->layerId();
15679   if (!isBatchedAtLevel(self, cur_level)) {
15680     return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other);
15681   }
15682   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15683   batch_rule(self_value, self_bdim, other);
15684   return self;
15685 }
15686 template <typename batch_rule_t, batch_rule_t batch_rule>
15687 at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
15688   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15689   auto maybe_layer = maybeCurrentDynamicLayer();
15690   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15691   int64_t cur_level = maybe_layer->layerId();
15692   if (!isBatchedAtLevel(other, cur_level)) {
15693     return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other);
15694   }
15695   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15696   auto results = batch_rule(self, other_value, other_bdim);
15697   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15698 }
15699 template <typename batch_rule_t, batch_rule_t batch_rule>
15700 at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15701   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15702   auto maybe_layer = maybeCurrentDynamicLayer();
15703   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15704   int64_t cur_level = maybe_layer->layerId();
15705   if (!isBatchedAtLevel(self, cur_level)) {
15706     return at::_ops::__rshift___Scalar::call(self, other);
15707   }
15708   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15709   auto results = batch_rule(self_value, self_bdim, other);
15710   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15711 }
15712 template <typename batch_rule_t, batch_rule_t batch_rule>
15713 at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15714   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15715   auto maybe_layer = maybeCurrentDynamicLayer();
15716   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15717   int64_t cur_level = maybe_layer->layerId();
15718   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15719     return at::_ops::__rshift___Tensor::call(self, other);
15720   }
15721   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15722   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15723   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15724   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15725 }
15726 template <typename batch_rule_t, batch_rule_t batch_rule>
15727 at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15728   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15729   auto maybe_layer = maybeCurrentDynamicLayer();
15730   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15731   int64_t cur_level = maybe_layer->layerId();
15732   if (!isBatchedAtLevel(self, cur_level)) {
15733     return at::_ops::__irshift___Scalar::call(self, other);
15734   }
15735   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15736   batch_rule(self_value, self_bdim, other);
15737   return self;
15738 }
15739 template <typename batch_rule_t, batch_rule_t batch_rule>
15740 at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15741   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15742   auto maybe_layer = maybeCurrentDynamicLayer();
15743   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15744   int64_t cur_level = maybe_layer->layerId();
15745   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15746     return at::_ops::__irshift___Tensor::call(self, other);
15747   }
15748   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15749   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15750   batch_rule(self_value, self_bdim, other_value, other_bdim);
15751   return self;
15752 }
15753 template <typename batch_rule_t, batch_rule_t batch_rule>
15754 at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
15755   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15756   auto maybe_layer = maybeCurrentDynamicLayer();
15757   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15758   int64_t cur_level = maybe_layer->layerId();
15759   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15760     return at::_ops::bitwise_right_shift_Tensor::call(self, other);
15761   }
15762   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15763   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15764   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
15765   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15766 }
15767 template <typename batch_rule_t, batch_rule_t batch_rule>
15768 at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
15769   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15770   auto maybe_layer = maybeCurrentDynamicLayer();
15771   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15772   int64_t cur_level = maybe_layer->layerId();
15773   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15774     return at::_ops::bitwise_right_shift__Tensor::call(self, other);
15775   }
15776   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15777   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15778   batch_rule(self_value, self_bdim, other_value, other_bdim);
15779   return self;
15780 }
15781 template <typename batch_rule_t, batch_rule_t batch_rule>
15782 at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
15783   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15784   auto maybe_layer = maybeCurrentDynamicLayer();
15785   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15786   int64_t cur_level = maybe_layer->layerId();
15787   if (!isBatchedAtLevel(self, cur_level)) {
15788     return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other);
15789   }
15790   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15791   auto results = batch_rule(self_value, self_bdim, other);
15792   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15793 }
15794 template <typename batch_rule_t, batch_rule_t batch_rule>
15795 at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15796   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15797   auto maybe_layer = maybeCurrentDynamicLayer();
15798   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15799   int64_t cur_level = maybe_layer->layerId();
15800   if (!isBatchedAtLevel(self, cur_level)) {
15801     return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other);
15802   }
15803   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15804   batch_rule(self_value, self_bdim, other);
15805   return self;
15806 }
15807 template <typename batch_rule_t, batch_rule_t batch_rule>
15808 at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
15809   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15810   auto maybe_layer = maybeCurrentDynamicLayer();
15811   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15812   int64_t cur_level = maybe_layer->layerId();
15813   if (!isBatchedAtLevel(other, cur_level)) {
15814     return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other);
15815   }
15816   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
15817   auto results = batch_rule(self, other_value, other_bdim);
15818   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15819 }
15820 template <typename batch_rule_t, batch_rule_t batch_rule>
15821 at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) {
15822   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15823   auto maybe_layer = maybeCurrentDynamicLayer();
15824   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15825   int64_t cur_level = maybe_layer->layerId();
15826   if (!isBatchedAtLevel(self, cur_level)) {
15827     return at::_ops::tril_::call(self, diagonal);
15828   }
15829   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15830   batch_rule(self_value, self_bdim, diagonal);
15831   return self;
15832 }
15833 template <typename batch_rule_t, batch_rule_t batch_rule>
15834 at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) {
15835   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15836   auto maybe_layer = maybeCurrentDynamicLayer();
15837   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15838   int64_t cur_level = maybe_layer->layerId();
15839   if (!isBatchedAtLevel(self, cur_level)) {
15840     return at::_ops::triu_::call(self, diagonal);
15841   }
15842   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15843   batch_rule(self_value, self_bdim, diagonal);
15844   return self;
15845 }
15846 template <typename batch_rule_t, batch_rule_t batch_rule>
15847 at::Tensor & digamma__generated_plumbing(at::Tensor & self) {
15848   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15849   auto maybe_layer = maybeCurrentDynamicLayer();
15850   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15851   int64_t cur_level = maybe_layer->layerId();
15852   if (!isBatchedAtLevel(self, cur_level)) {
15853     return at::_ops::digamma_::call(self);
15854   }
15855   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15856   batch_rule(self_value, self_bdim);
15857   return self;
15858 }
15859 template <typename batch_rule_t, batch_rule_t batch_rule>
15860 at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
15861   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15862   auto maybe_layer = maybeCurrentDynamicLayer();
15863   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15864   int64_t cur_level = maybe_layer->layerId();
15865   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
15866     return at::_ops::lerp__Scalar::call(self, end, weight);
15867   }
15868   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15869   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
15870   batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
15871   return self;
15872 }
15873 template <typename batch_rule_t, batch_rule_t batch_rule>
15874 at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
15875   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15876   auto maybe_layer = maybeCurrentDynamicLayer();
15877   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15878   int64_t cur_level = maybe_layer->layerId();
15879   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
15880     return at::_ops::lerp__Tensor::call(self, end, weight);
15881   }
15882   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15883   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
15884   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
15885   batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
15886   return self;
15887 }
15888 template <typename batch_rule_t, batch_rule_t batch_rule>
15889 at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
15890   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15891   auto maybe_layer = maybeCurrentDynamicLayer();
15892   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15893   int64_t cur_level = maybe_layer->layerId();
15894   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
15895     return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha);
15896   }
15897   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15898   auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level);
15899   auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level);
15900   batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
15901   return self;
15902 }
15903 template <typename batch_rule_t, batch_rule_t batch_rule>
15904 at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
15905   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15906   auto maybe_layer = maybeCurrentDynamicLayer();
15907   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15908   int64_t cur_level = maybe_layer->layerId();
15909   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
15910     return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
15911   }
15912   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15913   auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level);
15914   auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level);
15915   auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
15916   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15917 }
15918 template <typename batch_rule_t, batch_rule_t batch_rule>
15919 at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
15920   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15921   auto maybe_layer = maybeCurrentDynamicLayer();
15922   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15923   int64_t cur_level = maybe_layer->layerId();
15924   if (!isBatchedAtLevel(self, cur_level)) {
15925     return at::_ops::random__from::call(self, from, to, generator);
15926   }
15927   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15928   batch_rule(self_value, self_bdim, from, to, generator);
15929   return self;
15930 }
15931 template <typename batch_rule_t, batch_rule_t batch_rule>
15932 at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
15933   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15934   auto maybe_layer = maybeCurrentDynamicLayer();
15935   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15936   int64_t cur_level = maybe_layer->layerId();
15937   if (!isBatchedAtLevel(self, cur_level)) {
15938     return at::_ops::random__to::call(self, to, generator);
15939   }
15940   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15941   batch_rule(self_value, self_bdim, to, generator);
15942   return self;
15943 }
15944 template <typename batch_rule_t, batch_rule_t batch_rule>
15945 at::Tensor & random__generated_plumbing(at::Tensor & self, ::std::optional<at::Generator> generator) {
15946   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15947   auto maybe_layer = maybeCurrentDynamicLayer();
15948   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15949   int64_t cur_level = maybe_layer->layerId();
15950   if (!isBatchedAtLevel(self, cur_level)) {
15951     return at::_ops::random_::call(self, generator);
15952   }
15953   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15954   batch_rule(self_value, self_bdim, generator);
15955   return self;
15956 }
15957 template <typename batch_rule_t, batch_rule_t batch_rule>
15958 at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
15959   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15960   auto maybe_layer = maybeCurrentDynamicLayer();
15961   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15962   int64_t cur_level = maybe_layer->layerId();
15963   if (!isBatchedAtLevel(self, cur_level)) {
15964     return at::_ops::uniform_::call(self, from, to, generator);
15965   }
15966   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15967   batch_rule(self_value, self_bdim, from, to, generator);
15968   return self;
15969 }
15970 template <typename batch_rule_t, batch_rule_t batch_rule>
15971 at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
15972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15973   auto maybe_layer = maybeCurrentDynamicLayer();
15974   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15975   int64_t cur_level = maybe_layer->layerId();
15976   if (!isBatchedAtLevel(self, cur_level)) {
15977     return at::_ops::cauchy_::call(self, median, sigma, generator);
15978   }
15979   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15980   batch_rule(self_value, self_bdim, median, sigma, generator);
15981   return self;
15982 }
15983 template <typename batch_rule_t, batch_rule_t batch_rule>
15984 at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
15985   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15986   auto maybe_layer = maybeCurrentDynamicLayer();
15987   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
15988   int64_t cur_level = maybe_layer->layerId();
15989   if (!isBatchedAtLevel(self, cur_level)) {
15990     return at::_ops::log_normal_::call(self, mean, std, generator);
15991   }
15992   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
15993   batch_rule(self_value, self_bdim, mean, std, generator);
15994   return self;
15995 }
15996 template <typename batch_rule_t, batch_rule_t batch_rule>
15997 at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
15998   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15999   auto maybe_layer = maybeCurrentDynamicLayer();
16000   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16001   int64_t cur_level = maybe_layer->layerId();
16002   if (!isBatchedAtLevel(self, cur_level)) {
16003     return at::_ops::exponential_::call(self, lambd, generator);
16004   }
16005   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16006   batch_rule(self_value, self_bdim, lambd, generator);
16007   return self;
16008 }
16009 template <typename batch_rule_t, batch_rule_t batch_rule>
16010 at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
16011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16012   auto maybe_layer = maybeCurrentDynamicLayer();
16013   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16014   int64_t cur_level = maybe_layer->layerId();
16015   if (!isBatchedAtLevel(self, cur_level)) {
16016     return at::_ops::geometric_::call(self, p, generator);
16017   }
16018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16019   batch_rule(self_value, self_bdim, p, generator);
16020   return self;
16021 }
16022 template <typename batch_rule_t, batch_rule_t batch_rule>
16023 at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
16024   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16025   auto maybe_layer = maybeCurrentDynamicLayer();
16026   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16027   int64_t cur_level = maybe_layer->layerId();
16028   if (!isBatchedAtLevel(self, cur_level)) {
16029     return at::_ops::diag::call(self, diagonal);
16030   }
16031   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16032   auto results = batch_rule(self_value, self_bdim, diagonal);
16033   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16034 }
16035 template <typename batch_rule_t, batch_rule_t batch_rule>
16036 at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, ::std::optional<int64_t> dim) {
16037   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16038   auto maybe_layer = maybeCurrentDynamicLayer();
16039   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16040   int64_t cur_level = maybe_layer->layerId();
16041   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16042     return at::_ops::cross::call(self, other, dim);
16043   }
16044   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16045   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16046   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
16047   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16048 }
16049 template <typename batch_rule_t, batch_rule_t batch_rule>
16050 at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
16051   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16052   auto maybe_layer = maybeCurrentDynamicLayer();
16053   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16054   int64_t cur_level = maybe_layer->layerId();
16055   if (!isBatchedAtLevel(self, cur_level)) {
16056     return at::_ops::triu::call(self, diagonal);
16057   }
16058   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16059   auto results = batch_rule(self_value, self_bdim, diagonal);
16060   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16061 }
16062 template <typename batch_rule_t, batch_rule_t batch_rule>
16063 at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
16064   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16065   auto maybe_layer = maybeCurrentDynamicLayer();
16066   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16067   int64_t cur_level = maybe_layer->layerId();
16068   if (!isBatchedAtLevel(self, cur_level)) {
16069     return at::_ops::tril::call(self, diagonal);
16070   }
16071   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16072   auto results = batch_rule(self_value, self_bdim, diagonal);
16073   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16074 }
16075 template <typename batch_rule_t, batch_rule_t batch_rule>
16076 at::Tensor trace_generated_plumbing(const at::Tensor & self) {
16077   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16078   auto maybe_layer = maybeCurrentDynamicLayer();
16079   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16080   int64_t cur_level = maybe_layer->layerId();
16081   if (!isBatchedAtLevel(self, cur_level)) {
16082     return at::_ops::trace::call(self);
16083   }
16084   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16085   auto results = batch_rule(self_value, self_bdim);
16086   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16087 }
16088 template <typename batch_rule_t, batch_rule_t batch_rule>
16089 at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
16090   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16091   auto maybe_layer = maybeCurrentDynamicLayer();
16092   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16093   int64_t cur_level = maybe_layer->layerId();
16094   if (!isBatchedAtLevel(grad, cur_level)) {
16095     return at::_ops::trace_backward::call(grad, sizes);
16096   }
16097   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
16098   auto results = batch_rule(grad_value, grad_bdim, sizes);
16099   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16100 }
16101 template <typename batch_rule_t, batch_rule_t batch_rule>
16102 at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16103   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16104   auto maybe_layer = maybeCurrentDynamicLayer();
16105   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16106   int64_t cur_level = maybe_layer->layerId();
16107   if (!isBatchedAtLevel(self, cur_level)) {
16108     return at::_ops::ne_Scalar::call(self, other);
16109   }
16110   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16111   auto results = batch_rule(self_value, self_bdim, other);
16112   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16113 }
16114 template <typename batch_rule_t, batch_rule_t batch_rule>
16115 at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16116   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16117   auto maybe_layer = maybeCurrentDynamicLayer();
16118   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16119   int64_t cur_level = maybe_layer->layerId();
16120   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16121     return at::_ops::ne_Tensor::call(self, other);
16122   }
16123   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16124   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16125   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16126   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16127 }
16128 template <typename batch_rule_t, batch_rule_t batch_rule>
16129 at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16130   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16131   auto maybe_layer = maybeCurrentDynamicLayer();
16132   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16133   int64_t cur_level = maybe_layer->layerId();
16134   if (!isBatchedAtLevel(self, cur_level)) {
16135     return at::_ops::ne__Scalar::call(self, other);
16136   }
16137   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16138   batch_rule(self_value, self_bdim, other);
16139   return self;
16140 }
16141 template <typename batch_rule_t, batch_rule_t batch_rule>
16142 at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16143   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16144   auto maybe_layer = maybeCurrentDynamicLayer();
16145   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16146   int64_t cur_level = maybe_layer->layerId();
16147   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16148     return at::_ops::ne__Tensor::call(self, other);
16149   }
16150   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16151   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16152   batch_rule(self_value, self_bdim, other_value, other_bdim);
16153   return self;
16154 }
16155 template <typename batch_rule_t, batch_rule_t batch_rule>
16156 at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16157   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16158   auto maybe_layer = maybeCurrentDynamicLayer();
16159   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16160   int64_t cur_level = maybe_layer->layerId();
16161   if (!isBatchedAtLevel(self, cur_level)) {
16162     return at::_ops::not_equal_Scalar::call(self, other);
16163   }
16164   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16165   auto results = batch_rule(self_value, self_bdim, other);
16166   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16167 }
16168 template <typename batch_rule_t, batch_rule_t batch_rule>
16169 at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16170   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16171   auto maybe_layer = maybeCurrentDynamicLayer();
16172   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16173   int64_t cur_level = maybe_layer->layerId();
16174   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16175     return at::_ops::not_equal_Tensor::call(self, other);
16176   }
16177   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16178   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16179   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16180   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16181 }
16182 template <typename batch_rule_t, batch_rule_t batch_rule>
16183 at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16184   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16185   auto maybe_layer = maybeCurrentDynamicLayer();
16186   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16187   int64_t cur_level = maybe_layer->layerId();
16188   if (!isBatchedAtLevel(self, cur_level)) {
16189     return at::_ops::not_equal__Scalar::call(self, other);
16190   }
16191   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16192   batch_rule(self_value, self_bdim, other);
16193   return self;
16194 }
16195 template <typename batch_rule_t, batch_rule_t batch_rule>
16196 at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16197   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16198   auto maybe_layer = maybeCurrentDynamicLayer();
16199   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16200   int64_t cur_level = maybe_layer->layerId();
16201   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16202     return at::_ops::not_equal__Tensor::call(self, other);
16203   }
16204   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16205   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16206   batch_rule(self_value, self_bdim, other_value, other_bdim);
16207   return self;
16208 }
16209 template <typename batch_rule_t, batch_rule_t batch_rule>
16210 at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16211   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16212   auto maybe_layer = maybeCurrentDynamicLayer();
16213   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16214   int64_t cur_level = maybe_layer->layerId();
16215   if (!isBatchedAtLevel(self, cur_level)) {
16216     return at::_ops::eq_Scalar::call(self, other);
16217   }
16218   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16219   auto results = batch_rule(self_value, self_bdim, other);
16220   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16221 }
16222 template <typename batch_rule_t, batch_rule_t batch_rule>
16223 at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16224   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16225   auto maybe_layer = maybeCurrentDynamicLayer();
16226   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16227   int64_t cur_level = maybe_layer->layerId();
16228   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16229     return at::_ops::eq_Tensor::call(self, other);
16230   }
16231   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16232   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16233   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16234   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16235 }
16236 template <typename batch_rule_t, batch_rule_t batch_rule>
16237 at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16238   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16239   auto maybe_layer = maybeCurrentDynamicLayer();
16240   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16241   int64_t cur_level = maybe_layer->layerId();
16242   if (!isBatchedAtLevel(self, cur_level)) {
16243     return at::_ops::ge_Scalar::call(self, other);
16244   }
16245   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16246   auto results = batch_rule(self_value, self_bdim, other);
16247   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16248 }
16249 template <typename batch_rule_t, batch_rule_t batch_rule>
16250 at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16251   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16252   auto maybe_layer = maybeCurrentDynamicLayer();
16253   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16254   int64_t cur_level = maybe_layer->layerId();
16255   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16256     return at::_ops::ge_Tensor::call(self, other);
16257   }
16258   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16259   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16260   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16261   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16262 }
16263 template <typename batch_rule_t, batch_rule_t batch_rule>
16264 at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16265   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16266   auto maybe_layer = maybeCurrentDynamicLayer();
16267   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16268   int64_t cur_level = maybe_layer->layerId();
16269   if (!isBatchedAtLevel(self, cur_level)) {
16270     return at::_ops::ge__Scalar::call(self, other);
16271   }
16272   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16273   batch_rule(self_value, self_bdim, other);
16274   return self;
16275 }
16276 template <typename batch_rule_t, batch_rule_t batch_rule>
16277 at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16278   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16279   auto maybe_layer = maybeCurrentDynamicLayer();
16280   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16281   int64_t cur_level = maybe_layer->layerId();
16282   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16283     return at::_ops::ge__Tensor::call(self, other);
16284   }
16285   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16286   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16287   batch_rule(self_value, self_bdim, other_value, other_bdim);
16288   return self;
16289 }
16290 template <typename batch_rule_t, batch_rule_t batch_rule>
16291 at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16292   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16293   auto maybe_layer = maybeCurrentDynamicLayer();
16294   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16295   int64_t cur_level = maybe_layer->layerId();
16296   if (!isBatchedAtLevel(self, cur_level)) {
16297     return at::_ops::greater_equal_Scalar::call(self, other);
16298   }
16299   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16300   auto results = batch_rule(self_value, self_bdim, other);
16301   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16302 }
16303 template <typename batch_rule_t, batch_rule_t batch_rule>
16304 at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16305   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16306   auto maybe_layer = maybeCurrentDynamicLayer();
16307   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16308   int64_t cur_level = maybe_layer->layerId();
16309   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16310     return at::_ops::greater_equal_Tensor::call(self, other);
16311   }
16312   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16313   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16314   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16315   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16316 }
16317 template <typename batch_rule_t, batch_rule_t batch_rule>
16318 at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16319   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16320   auto maybe_layer = maybeCurrentDynamicLayer();
16321   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16322   int64_t cur_level = maybe_layer->layerId();
16323   if (!isBatchedAtLevel(self, cur_level)) {
16324     return at::_ops::greater_equal__Scalar::call(self, other);
16325   }
16326   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16327   batch_rule(self_value, self_bdim, other);
16328   return self;
16329 }
16330 template <typename batch_rule_t, batch_rule_t batch_rule>
16331 at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16332   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16333   auto maybe_layer = maybeCurrentDynamicLayer();
16334   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16335   int64_t cur_level = maybe_layer->layerId();
16336   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16337     return at::_ops::greater_equal__Tensor::call(self, other);
16338   }
16339   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16340   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16341   batch_rule(self_value, self_bdim, other_value, other_bdim);
16342   return self;
16343 }
16344 template <typename batch_rule_t, batch_rule_t batch_rule>
16345 at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16346   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16347   auto maybe_layer = maybeCurrentDynamicLayer();
16348   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16349   int64_t cur_level = maybe_layer->layerId();
16350   if (!isBatchedAtLevel(self, cur_level)) {
16351     return at::_ops::le_Scalar::call(self, other);
16352   }
16353   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16354   auto results = batch_rule(self_value, self_bdim, other);
16355   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16356 }
16357 template <typename batch_rule_t, batch_rule_t batch_rule>
16358 at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16359   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16360   auto maybe_layer = maybeCurrentDynamicLayer();
16361   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16362   int64_t cur_level = maybe_layer->layerId();
16363   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16364     return at::_ops::le_Tensor::call(self, other);
16365   }
16366   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16367   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16368   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16369   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16370 }
16371 template <typename batch_rule_t, batch_rule_t batch_rule>
16372 at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16373   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16374   auto maybe_layer = maybeCurrentDynamicLayer();
16375   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16376   int64_t cur_level = maybe_layer->layerId();
16377   if (!isBatchedAtLevel(self, cur_level)) {
16378     return at::_ops::le__Scalar::call(self, other);
16379   }
16380   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16381   batch_rule(self_value, self_bdim, other);
16382   return self;
16383 }
16384 template <typename batch_rule_t, batch_rule_t batch_rule>
16385 at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16386   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16387   auto maybe_layer = maybeCurrentDynamicLayer();
16388   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16389   int64_t cur_level = maybe_layer->layerId();
16390   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16391     return at::_ops::le__Tensor::call(self, other);
16392   }
16393   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16394   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16395   batch_rule(self_value, self_bdim, other_value, other_bdim);
16396   return self;
16397 }
16398 template <typename batch_rule_t, batch_rule_t batch_rule>
16399 at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16400   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16401   auto maybe_layer = maybeCurrentDynamicLayer();
16402   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16403   int64_t cur_level = maybe_layer->layerId();
16404   if (!isBatchedAtLevel(self, cur_level)) {
16405     return at::_ops::less_equal_Scalar::call(self, other);
16406   }
16407   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16408   auto results = batch_rule(self_value, self_bdim, other);
16409   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16410 }
16411 template <typename batch_rule_t, batch_rule_t batch_rule>
16412 at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16413   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16414   auto maybe_layer = maybeCurrentDynamicLayer();
16415   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16416   int64_t cur_level = maybe_layer->layerId();
16417   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16418     return at::_ops::less_equal_Tensor::call(self, other);
16419   }
16420   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16421   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16422   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16423   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16424 }
16425 template <typename batch_rule_t, batch_rule_t batch_rule>
16426 at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16427   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16428   auto maybe_layer = maybeCurrentDynamicLayer();
16429   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16430   int64_t cur_level = maybe_layer->layerId();
16431   if (!isBatchedAtLevel(self, cur_level)) {
16432     return at::_ops::less_equal__Scalar::call(self, other);
16433   }
16434   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16435   batch_rule(self_value, self_bdim, other);
16436   return self;
16437 }
16438 template <typename batch_rule_t, batch_rule_t batch_rule>
16439 at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16440   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16441   auto maybe_layer = maybeCurrentDynamicLayer();
16442   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16443   int64_t cur_level = maybe_layer->layerId();
16444   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16445     return at::_ops::less_equal__Tensor::call(self, other);
16446   }
16447   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16448   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16449   batch_rule(self_value, self_bdim, other_value, other_bdim);
16450   return self;
16451 }
16452 template <typename batch_rule_t, batch_rule_t batch_rule>
16453 at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16454   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16455   auto maybe_layer = maybeCurrentDynamicLayer();
16456   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16457   int64_t cur_level = maybe_layer->layerId();
16458   if (!isBatchedAtLevel(self, cur_level)) {
16459     return at::_ops::gt_Scalar::call(self, other);
16460   }
16461   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16462   auto results = batch_rule(self_value, self_bdim, other);
16463   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16464 }
16465 template <typename batch_rule_t, batch_rule_t batch_rule>
16466 at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16467   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16468   auto maybe_layer = maybeCurrentDynamicLayer();
16469   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16470   int64_t cur_level = maybe_layer->layerId();
16471   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16472     return at::_ops::gt_Tensor::call(self, other);
16473   }
16474   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16475   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16476   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16477   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16478 }
16479 template <typename batch_rule_t, batch_rule_t batch_rule>
16480 at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16481   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16482   auto maybe_layer = maybeCurrentDynamicLayer();
16483   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16484   int64_t cur_level = maybe_layer->layerId();
16485   if (!isBatchedAtLevel(self, cur_level)) {
16486     return at::_ops::gt__Scalar::call(self, other);
16487   }
16488   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16489   batch_rule(self_value, self_bdim, other);
16490   return self;
16491 }
16492 template <typename batch_rule_t, batch_rule_t batch_rule>
16493 at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16494   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16495   auto maybe_layer = maybeCurrentDynamicLayer();
16496   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16497   int64_t cur_level = maybe_layer->layerId();
16498   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16499     return at::_ops::gt__Tensor::call(self, other);
16500   }
16501   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16502   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16503   batch_rule(self_value, self_bdim, other_value, other_bdim);
16504   return self;
16505 }
16506 template <typename batch_rule_t, batch_rule_t batch_rule>
16507 at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16508   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16509   auto maybe_layer = maybeCurrentDynamicLayer();
16510   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16511   int64_t cur_level = maybe_layer->layerId();
16512   if (!isBatchedAtLevel(self, cur_level)) {
16513     return at::_ops::greater_Scalar::call(self, other);
16514   }
16515   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16516   auto results = batch_rule(self_value, self_bdim, other);
16517   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16518 }
16519 template <typename batch_rule_t, batch_rule_t batch_rule>
16520 at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16521   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16522   auto maybe_layer = maybeCurrentDynamicLayer();
16523   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16524   int64_t cur_level = maybe_layer->layerId();
16525   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16526     return at::_ops::greater_Tensor::call(self, other);
16527   }
16528   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16529   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16530   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16531   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16532 }
16533 template <typename batch_rule_t, batch_rule_t batch_rule>
16534 at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16535   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16536   auto maybe_layer = maybeCurrentDynamicLayer();
16537   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16538   int64_t cur_level = maybe_layer->layerId();
16539   if (!isBatchedAtLevel(self, cur_level)) {
16540     return at::_ops::greater__Scalar::call(self, other);
16541   }
16542   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16543   batch_rule(self_value, self_bdim, other);
16544   return self;
16545 }
16546 template <typename batch_rule_t, batch_rule_t batch_rule>
16547 at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16548   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16549   auto maybe_layer = maybeCurrentDynamicLayer();
16550   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16551   int64_t cur_level = maybe_layer->layerId();
16552   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16553     return at::_ops::greater__Tensor::call(self, other);
16554   }
16555   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16556   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16557   batch_rule(self_value, self_bdim, other_value, other_bdim);
16558   return self;
16559 }
16560 template <typename batch_rule_t, batch_rule_t batch_rule>
16561 at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16562   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16563   auto maybe_layer = maybeCurrentDynamicLayer();
16564   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16565   int64_t cur_level = maybe_layer->layerId();
16566   if (!isBatchedAtLevel(self, cur_level)) {
16567     return at::_ops::lt_Scalar::call(self, other);
16568   }
16569   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16570   auto results = batch_rule(self_value, self_bdim, other);
16571   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16572 }
16573 template <typename batch_rule_t, batch_rule_t batch_rule>
16574 at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16575   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16576   auto maybe_layer = maybeCurrentDynamicLayer();
16577   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16578   int64_t cur_level = maybe_layer->layerId();
16579   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16580     return at::_ops::lt_Tensor::call(self, other);
16581   }
16582   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16583   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16584   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16585   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16586 }
16587 template <typename batch_rule_t, batch_rule_t batch_rule>
16588 at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16589   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16590   auto maybe_layer = maybeCurrentDynamicLayer();
16591   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16592   int64_t cur_level = maybe_layer->layerId();
16593   if (!isBatchedAtLevel(self, cur_level)) {
16594     return at::_ops::lt__Scalar::call(self, other);
16595   }
16596   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16597   batch_rule(self_value, self_bdim, other);
16598   return self;
16599 }
16600 template <typename batch_rule_t, batch_rule_t batch_rule>
16601 at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16602   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16603   auto maybe_layer = maybeCurrentDynamicLayer();
16604   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16605   int64_t cur_level = maybe_layer->layerId();
16606   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16607     return at::_ops::lt__Tensor::call(self, other);
16608   }
16609   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16610   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16611   batch_rule(self_value, self_bdim, other_value, other_bdim);
16612   return self;
16613 }
16614 template <typename batch_rule_t, batch_rule_t batch_rule>
16615 at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
16616   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16617   auto maybe_layer = maybeCurrentDynamicLayer();
16618   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16619   int64_t cur_level = maybe_layer->layerId();
16620   if (!isBatchedAtLevel(self, cur_level)) {
16621     return at::_ops::less_Scalar::call(self, other);
16622   }
16623   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16624   auto results = batch_rule(self_value, self_bdim, other);
16625   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16626 }
16627 template <typename batch_rule_t, batch_rule_t batch_rule>
16628 at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
16629   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16630   auto maybe_layer = maybeCurrentDynamicLayer();
16631   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16632   int64_t cur_level = maybe_layer->layerId();
16633   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16634     return at::_ops::less_Tensor::call(self, other);
16635   }
16636   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16637   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16638   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
16639   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16640 }
16641 template <typename batch_rule_t, batch_rule_t batch_rule>
16642 at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
16643   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16644   auto maybe_layer = maybeCurrentDynamicLayer();
16645   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16646   int64_t cur_level = maybe_layer->layerId();
16647   if (!isBatchedAtLevel(self, cur_level)) {
16648     return at::_ops::less__Scalar::call(self, other);
16649   }
16650   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16651   batch_rule(self_value, self_bdim, other);
16652   return self;
16653 }
16654 template <typename batch_rule_t, batch_rule_t batch_rule>
16655 at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
16656   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16657   auto maybe_layer = maybeCurrentDynamicLayer();
16658   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16659   int64_t cur_level = maybe_layer->layerId();
16660   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
16661     return at::_ops::less__Tensor::call(self, other);
16662   }
16663   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16664   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
16665   batch_rule(self_value, self_bdim, other_value, other_bdim);
16666   return self;
16667 }
16668 template <typename batch_rule_t, batch_rule_t batch_rule>
16669 at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) {
16670   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16671   auto maybe_layer = maybeCurrentDynamicLayer();
16672   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16673   int64_t cur_level = maybe_layer->layerId();
16674   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16675     return at::_ops::take::call(self, index);
16676   }
16677   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16678   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16679   auto results = batch_rule(self_value, self_bdim, index_value, index_bdim);
16680   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16681 }
16682 template <typename batch_rule_t, batch_rule_t batch_rule>
16683 at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, ::std::optional<int64_t> dim) {
16684   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16685   auto maybe_layer = maybeCurrentDynamicLayer();
16686   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16687   int64_t cur_level = maybe_layer->layerId();
16688   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
16689     return at::_ops::take_along_dim::call(self, indices, dim);
16690   }
16691   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16692   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
16693   auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim);
16694   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16695 }
16696 template <typename batch_rule_t, batch_rule_t batch_rule>
16697 at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
16698   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16699   auto maybe_layer = maybeCurrentDynamicLayer();
16700   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16701   int64_t cur_level = maybe_layer->layerId();
16702   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16703     return at::_ops::index_select::call(self, dim, index);
16704   }
16705   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16706   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16707   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
16708   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16709 }
16710 template <typename batch_rule_t, batch_rule_t batch_rule>
16711 at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
16712   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16713   auto maybe_layer = maybeCurrentDynamicLayer();
16714   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16715   int64_t cur_level = maybe_layer->layerId();
16716   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16717     return at::_ops::index_select_dimname::call(self, dim, index);
16718   }
16719   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16720   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16721   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
16722   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16723 }
16724 template <typename batch_rule_t, batch_rule_t batch_rule>
16725 at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
16726   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16727   auto maybe_layer = maybeCurrentDynamicLayer();
16728   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16729   int64_t cur_level = maybe_layer->layerId();
16730   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16731     return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
16732   }
16733   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
16734   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16735   auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim);
16736   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16737 }
16738 template <typename batch_rule_t, batch_rule_t batch_rule>
16739 at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
16740   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16741   auto maybe_layer = maybeCurrentDynamicLayer();
16742   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16743   int64_t cur_level = maybe_layer->layerId();
16744   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
16745     return at::_ops::masked_select::call(self, mask);
16746   }
16747   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16748   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
16749   auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
16750   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16751 }
16752 template <typename batch_rule_t, batch_rule_t batch_rule>
16753 at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
16754   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16755   auto maybe_layer = maybeCurrentDynamicLayer();
16756   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16757   int64_t cur_level = maybe_layer->layerId();
16758   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
16759     return at::_ops::masked_select_backward::call(grad, input, mask);
16760   }
16761   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
16762   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
16763   auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level);
16764   auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim);
16765   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16766 }
16767 template <typename batch_rule_t, batch_rule_t batch_rule>
16768 at::Tensor nonzero_generated_plumbing(const at::Tensor & self) {
16769   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16770   auto maybe_layer = maybeCurrentDynamicLayer();
16771   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16772   int64_t cur_level = maybe_layer->layerId();
16773   if (!isBatchedAtLevel(self, cur_level)) {
16774     return at::_ops::nonzero::call(self);
16775   }
16776   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16777   auto results = batch_rule(self_value, self_bdim);
16778   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16779 }
16780 template <typename batch_rule_t, batch_rule_t batch_rule>
16781 at::Tensor nonzero_static_generated_plumbing(const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
16782   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16783   auto maybe_layer = maybeCurrentDynamicLayer();
16784   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16785   int64_t cur_level = maybe_layer->layerId();
16786   if (!isBatchedAtLevel(self, cur_level)) {
16787     return at::_ops::nonzero_static::call(self, size, fill_value);
16788   }
16789   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16790   auto results = batch_rule(self_value, self_bdim, size, fill_value);
16791   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16792 }
16793 template <typename batch_rule_t, batch_rule_t batch_rule>
16794 ::std::vector<at::Tensor> nonzero_numpy_generated_plumbing(const at::Tensor & self) {
16795   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16796   auto maybe_layer = maybeCurrentDynamicLayer();
16797   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16798   int64_t cur_level = maybe_layer->layerId();
16799   if (!isBatchedAtLevel(self, cur_level)) {
16800     return at::_ops::nonzero_numpy::call(self);
16801   }
16802   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16803   auto results = batch_rule(self_value, self_bdim);
16804   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
16805 }
16806 template <typename batch_rule_t, batch_rule_t batch_rule>
16807 at::Tensor argwhere_generated_plumbing(const at::Tensor & self) {
16808   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16809   auto maybe_layer = maybeCurrentDynamicLayer();
16810   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16811   int64_t cur_level = maybe_layer->layerId();
16812   if (!isBatchedAtLevel(self, cur_level)) {
16813     return at::_ops::argwhere::call(self);
16814   }
16815   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16816   auto results = batch_rule(self_value, self_bdim);
16817   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16818 }
16819 template <typename batch_rule_t, batch_rule_t batch_rule>
16820 at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
16821   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16822   auto maybe_layer = maybeCurrentDynamicLayer();
16823   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16824   int64_t cur_level = maybe_layer->layerId();
16825   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16826     return at::_ops::gather::call(self, dim, index, sparse_grad);
16827   }
16828   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16829   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16830   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
16831   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16832 }
16833 template <typename batch_rule_t, batch_rule_t batch_rule>
16834 at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
16835   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16836   auto maybe_layer = maybeCurrentDynamicLayer();
16837   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16838   int64_t cur_level = maybe_layer->layerId();
16839   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16840     return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad);
16841   }
16842   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
16843   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16844   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16845   auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
16846   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16847 }
16848 template <typename batch_rule_t, batch_rule_t batch_rule>
16849 at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
16850   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16851   auto maybe_layer = maybeCurrentDynamicLayer();
16852   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16853   int64_t cur_level = maybe_layer->layerId();
16854   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16855     return at::_ops::gather_dimname::call(self, dim, index, sparse_grad);
16856   }
16857   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16858   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16859   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
16860   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16861 }
16862 template <typename batch_rule_t, batch_rule_t batch_rule>
16863 at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
16864   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16865   auto maybe_layer = maybeCurrentDynamicLayer();
16866   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16867   int64_t cur_level = maybe_layer->layerId();
16868   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
16869     return at::_ops::_gather_sparse_backward::call(self, dim, index, grad);
16870   }
16871   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16872   auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level);
16873   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
16874   auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim);
16875   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16876 }
16877 template <typename batch_rule_t, batch_rule_t batch_rule>
16878 at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
16879   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16880   auto maybe_layer = maybeCurrentDynamicLayer();
16881   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16882   int64_t cur_level = maybe_layer->layerId();
16883   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
16884     return at::_ops::addcmul::call(self, tensor1, tensor2, value);
16885   }
16886   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16887   auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level);
16888   auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level);
16889   auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
16890   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16891 }
16892 template <typename batch_rule_t, batch_rule_t batch_rule>
16893 at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
16894   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16895   auto maybe_layer = maybeCurrentDynamicLayer();
16896   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16897   int64_t cur_level = maybe_layer->layerId();
16898   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
16899     return at::_ops::addcmul_::call(self, tensor1, tensor2, value);
16900   }
16901   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16902   auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level);
16903   auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level);
16904   batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
16905   return self;
16906 }
16907 template <typename batch_rule_t, batch_rule_t batch_rule>
16908 at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
16909   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16910   auto maybe_layer = maybeCurrentDynamicLayer();
16911   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16912   int64_t cur_level = maybe_layer->layerId();
16913   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
16914     return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
16915   }
16916   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16917   auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level);
16918   auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level);
16919   auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
16920   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16921 }
16922 template <typename batch_rule_t, batch_rule_t batch_rule>
16923 at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
16924   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16925   auto maybe_layer = maybeCurrentDynamicLayer();
16926   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16927   int64_t cur_level = maybe_layer->layerId();
16928   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
16929     return at::_ops::addcdiv_::call(self, tensor1, tensor2, value);
16930   }
16931   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16932   auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level);
16933   auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level);
16934   batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
16935   return self;
16936 }
16937 template <typename batch_rule_t, batch_rule_t batch_rule>
16938 at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
16939   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16940   auto maybe_layer = maybeCurrentDynamicLayer();
16941   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16942   int64_t cur_level = maybe_layer->layerId();
16943   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
16944     return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
16945   }
16946   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16947   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
16948   std::optional<Tensor> weight_value;
16949   std::optional<int64_t> weight_bdim;
16950   if (weight) {
16951       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
16952   }
16953   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing);
16954   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16955 }
16956 template <typename batch_rule_t, batch_rule_t batch_rule>
16957 ::std::tuple<at::Tensor,at::Tensor> triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
16958   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16959   auto maybe_layer = maybeCurrentDynamicLayer();
16960   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16961   int64_t cur_level = maybe_layer->layerId();
16962   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
16963     return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular);
16964   }
16965   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16966   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
16967   auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular);
16968   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
16969 }
16970 template <typename batch_rule_t, batch_rule_t batch_rule>
16971 void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
16972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16973   auto maybe_layer = maybeCurrentDynamicLayer();
16974   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
16975   int64_t cur_level = maybe_layer->layerId();
16976   if (!isBatchedAtLevel(info, cur_level)) {
16977     return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix);
16978   }
16979   auto [info_value, info_bdim] = unwrapTensorAtLevel(info, cur_level);
16980   batch_rule(info_value, info_bdim, api_name, is_matrix);
16981 }
16982 template <typename batch_rule_t, batch_rule_t batch_rule>
16983 at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
16984   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16985   auto maybe_layer = maybeCurrentDynamicLayer();
16986   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16987   int64_t cur_level = maybe_layer->layerId();
16988   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) {
16989     return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular);
16990   }
16991   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
16992   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
16993   auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular);
16994   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16995 }
16996 template <typename batch_rule_t, batch_rule_t batch_rule>
16997 at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, ::std::optional<c10::SymInt> N) {
16998   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16999   auto maybe_layer = maybeCurrentDynamicLayer();
17000   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17001   int64_t cur_level = maybe_layer->layerId();
17002   if (!isBatchedAtLevel(x, cur_level)) {
17003     return at::_ops::linalg_vander::call(x, N);
17004   }
17005   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
17006   auto results = batch_rule(x_value, x_bdim, N);
17007   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17008 }
17009 template <typename batch_rule_t, batch_rule_t batch_rule>
17010 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) {
17011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17012   auto maybe_layer = maybeCurrentDynamicLayer();
17013   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17014   int64_t cur_level = maybe_layer->layerId();
17015   if (!isBatchedAtLevel(self, cur_level)) {
17016     return at::_ops::svd::call(self, some, compute_uv);
17017   }
17018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17019   auto results = batch_rule(self_value, self_bdim, some, compute_uv);
17020   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
17021 }
17022 template <typename batch_rule_t, batch_rule_t batch_rule>
17023 at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) {
17024   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17025   auto maybe_layer = maybeCurrentDynamicLayer();
17026   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17027   int64_t cur_level = maybe_layer->layerId();
17028   if (!isBatchedAtLevel(self, cur_level)) {
17029     return at::_ops::swapaxes::call(self, axis0, axis1);
17030   }
17031   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17032   auto results = batch_rule(self_value, self_bdim, axis0, axis1);
17033   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17034 }
17035 template <typename batch_rule_t, batch_rule_t batch_rule>
17036 at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
17037   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17038   auto maybe_layer = maybeCurrentDynamicLayer();
17039   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17040   int64_t cur_level = maybe_layer->layerId();
17041   if (!isBatchedAtLevel(self, cur_level)) {
17042     return at::_ops::swapdims::call(self, dim0, dim1);
17043   }
17044   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17045   auto results = batch_rule(self_value, self_bdim, dim0, dim1);
17046   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17047 }
17048 template <typename batch_rule_t, batch_rule_t batch_rule>
17049 at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
17050   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17051   auto maybe_layer = maybeCurrentDynamicLayer();
17052   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17053   int64_t cur_level = maybe_layer->layerId();
17054   if (!isBatchedAtLevel(self, cur_level)) {
17055     return at::_ops::cholesky::call(self, upper);
17056   }
17057   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17058   auto results = batch_rule(self_value, self_bdim, upper);
17059   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17060 }
17061 template <typename batch_rule_t, batch_rule_t batch_rule>
17062 at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) {
17063   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17064   auto maybe_layer = maybeCurrentDynamicLayer();
17065   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17066   int64_t cur_level = maybe_layer->layerId();
17067   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
17068     return at::_ops::cholesky_solve::call(self, input2, upper);
17069   }
17070   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17071   auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level);
17072   auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper);
17073   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17074 }
17075 template <typename batch_rule_t, batch_rule_t batch_rule>
17076 at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) {
17077   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17078   auto maybe_layer = maybeCurrentDynamicLayer();
17079   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17080   int64_t cur_level = maybe_layer->layerId();
17081   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
17082     return at::_ops::_cholesky_solve_helper::call(self, A, upper);
17083   }
17084   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17085   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
17086   auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper);
17087   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17088 }
17089 template <typename batch_rule_t, batch_rule_t batch_rule>
17090 at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) {
17091   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17092   auto maybe_layer = maybeCurrentDynamicLayer();
17093   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17094   int64_t cur_level = maybe_layer->layerId();
17095   if (!isBatchedAtLevel(self, cur_level)) {
17096     return at::_ops::cholesky_inverse::call(self, upper);
17097   }
17098   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17099   auto results = batch_rule(self_value, self_bdim, upper);
17100   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17101 }
17102 template <typename batch_rule_t, batch_rule_t batch_rule>
17103 ::std::tuple<at::Tensor,at::Tensor> qr_generated_plumbing(const at::Tensor & self, bool some) {
17104   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17105   auto maybe_layer = maybeCurrentDynamicLayer();
17106   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17107   int64_t cur_level = maybe_layer->layerId();
17108   if (!isBatchedAtLevel(self, cur_level)) {
17109     return at::_ops::qr::call(self, some);
17110   }
17111   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17112   auto results = batch_rule(self_value, self_bdim, some);
17113   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
17114 }
17115 template <typename batch_rule_t, batch_rule_t batch_rule>
17116 ::std::tuple<at::Tensor,at::Tensor> geqrf_generated_plumbing(const at::Tensor & self) {
17117   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17118   auto maybe_layer = maybeCurrentDynamicLayer();
17119   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17120   int64_t cur_level = maybe_layer->layerId();
17121   if (!isBatchedAtLevel(self, cur_level)) {
17122     return at::_ops::geqrf::call(self);
17123   }
17124   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17125   auto results = batch_rule(self_value, self_bdim);
17126   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
17127 }
17128 template <typename batch_rule_t, batch_rule_t batch_rule>
17129 at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) {
17130   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17131   auto maybe_layer = maybeCurrentDynamicLayer();
17132   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17133   int64_t cur_level = maybe_layer->layerId();
17134   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
17135     return at::_ops::orgqr::call(self, input2);
17136   }
17137   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17138   auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level);
17139   auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim);
17140   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17141 }
17142 template <typename batch_rule_t, batch_rule_t batch_rule>
17143 at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
17144   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17145   auto maybe_layer = maybeCurrentDynamicLayer();
17146   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17147   int64_t cur_level = maybe_layer->layerId();
17148   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) {
17149     return at::_ops::ormqr::call(self, input2, input3, left, transpose);
17150   }
17151   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17152   auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level);
17153   auto [input3_value, input3_bdim] = unwrapTensorAtLevel(input3, cur_level);
17154   auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose);
17155   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17156 }
17157 template <typename batch_rule_t, batch_rule_t batch_rule>
17158 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) {
17159   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17160   auto maybe_layer = maybeCurrentDynamicLayer();
17161   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17162   int64_t cur_level = maybe_layer->layerId();
17163   if (!isBatchedAtLevel(self, cur_level)) {
17164     return at::_ops::_lu_with_info::call(self, pivot, check_errors);
17165   }
17166   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17167   auto results = batch_rule(self_value, self_bdim, pivot, check_errors);
17168   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
17169 }
17170 template <typename batch_rule_t, batch_rule_t batch_rule>
17171 at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
17172   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17173   auto maybe_layer = maybeCurrentDynamicLayer();
17174   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17175   int64_t cur_level = maybe_layer->layerId();
17176   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
17177     return at::_ops::lu_solve::call(self, LU_data, LU_pivots);
17178   }
17179   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17180   auto [LU_data_value, LU_data_bdim] = unwrapTensorAtLevel(LU_data, cur_level);
17181   auto [LU_pivots_value, LU_pivots_bdim] = unwrapTensorAtLevel(LU_pivots, cur_level);
17182   auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim);
17183   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17184 }
17185 template <typename batch_rule_t, batch_rule_t batch_rule>
17186 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
17187   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17188   auto maybe_layer = maybeCurrentDynamicLayer();
17189   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17190   int64_t cur_level = maybe_layer->layerId();
17191   if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
17192     return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots);
17193   }
17194   auto [LU_data_value, LU_data_bdim] = unwrapTensorAtLevel(LU_data, cur_level);
17195   auto [LU_pivots_value, LU_pivots_bdim] = unwrapTensorAtLevel(LU_pivots, cur_level);
17196   auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots);
17197   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
17198 }
17199 template <typename batch_rule_t, batch_rule_t batch_rule>
17200 at::Tensor multinomial_generated_plumbing(const at::Tensor & self, c10::SymInt num_samples, bool replacement, ::std::optional<at::Generator> generator) {
17201   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17202   auto maybe_layer = maybeCurrentDynamicLayer();
17203   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17204   int64_t cur_level = maybe_layer->layerId();
17205   if (!isBatchedAtLevel(self, cur_level)) {
17206     return at::_ops::multinomial::call(self, num_samples, replacement, generator);
17207   }
17208   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17209   auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator);
17210   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17211 }
17212 template <typename batch_rule_t, batch_rule_t batch_rule>
17213 at::Tensor & lgamma__generated_plumbing(at::Tensor & self) {
17214   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17215   auto maybe_layer = maybeCurrentDynamicLayer();
17216   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17217   int64_t cur_level = maybe_layer->layerId();
17218   if (!isBatchedAtLevel(self, cur_level)) {
17219     return at::_ops::lgamma_::call(self);
17220   }
17221   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17222   batch_rule(self_value, self_bdim);
17223   return self;
17224 }
17225 template <typename batch_rule_t, batch_rule_t batch_rule>
17226 at::Tensor lgamma_generated_plumbing(const at::Tensor & self) {
17227   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17228   auto maybe_layer = maybeCurrentDynamicLayer();
17229   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17230   int64_t cur_level = maybe_layer->layerId();
17231   if (!isBatchedAtLevel(self, cur_level)) {
17232     return at::_ops::lgamma::call(self);
17233   }
17234   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17235   auto results = batch_rule(self_value, self_bdim);
17236   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17237 }
17238 template <typename batch_rule_t, batch_rule_t batch_rule>
17239 at::Tensor digamma_generated_plumbing(const at::Tensor & self) {
17240   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17241   auto maybe_layer = maybeCurrentDynamicLayer();
17242   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17243   int64_t cur_level = maybe_layer->layerId();
17244   if (!isBatchedAtLevel(self, cur_level)) {
17245     return at::_ops::digamma::call(self);
17246   }
17247   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17248   auto results = batch_rule(self_value, self_bdim);
17249   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17250 }
17251 template <typename batch_rule_t, batch_rule_t batch_rule>
17252 at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
17253   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17254   auto maybe_layer = maybeCurrentDynamicLayer();
17255   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17256   int64_t cur_level = maybe_layer->layerId();
17257   if (!isBatchedAtLevel(self, cur_level)) {
17258     return at::_ops::polygamma::call(n, self);
17259   }
17260   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17261   auto results = batch_rule(n, self_value, self_bdim);
17262   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17263 }
17264 template <typename batch_rule_t, batch_rule_t batch_rule>
17265 at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) {
17266   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17267   auto maybe_layer = maybeCurrentDynamicLayer();
17268   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17269   int64_t cur_level = maybe_layer->layerId();
17270   if (!isBatchedAtLevel(self, cur_level)) {
17271     return at::_ops::polygamma_::call(self, n);
17272   }
17273   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17274   batch_rule(self_value, self_bdim, n);
17275   return self;
17276 }
17277 template <typename batch_rule_t, batch_rule_t batch_rule>
17278 at::Tensor erfinv_generated_plumbing(const at::Tensor & self) {
17279   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17280   auto maybe_layer = maybeCurrentDynamicLayer();
17281   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17282   int64_t cur_level = maybe_layer->layerId();
17283   if (!isBatchedAtLevel(self, cur_level)) {
17284     return at::_ops::erfinv::call(self);
17285   }
17286   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17287   auto results = batch_rule(self_value, self_bdim);
17288   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17289 }
17290 template <typename batch_rule_t, batch_rule_t batch_rule>
17291 at::Tensor & erfinv__generated_plumbing(at::Tensor & self) {
17292   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17293   auto maybe_layer = maybeCurrentDynamicLayer();
17294   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17295   int64_t cur_level = maybe_layer->layerId();
17296   if (!isBatchedAtLevel(self, cur_level)) {
17297     return at::_ops::erfinv_::call(self);
17298   }
17299   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17300   batch_rule(self_value, self_bdim);
17301   return self;
17302 }
17303 template <typename batch_rule_t, batch_rule_t batch_rule>
17304 at::Tensor i0_generated_plumbing(const at::Tensor & self) {
17305   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17306   auto maybe_layer = maybeCurrentDynamicLayer();
17307   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17308   int64_t cur_level = maybe_layer->layerId();
17309   if (!isBatchedAtLevel(self, cur_level)) {
17310     return at::_ops::i0::call(self);
17311   }
17312   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17313   auto results = batch_rule(self_value, self_bdim);
17314   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17315 }
17316 template <typename batch_rule_t, batch_rule_t batch_rule>
17317 at::Tensor & i0__generated_plumbing(at::Tensor & self) {
17318   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17319   auto maybe_layer = maybeCurrentDynamicLayer();
17320   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17321   int64_t cur_level = maybe_layer->layerId();
17322   if (!isBatchedAtLevel(self, cur_level)) {
17323     return at::_ops::i0_::call(self);
17324   }
17325   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17326   batch_rule(self_value, self_bdim);
17327   return self;
17328 }
17329 template <typename batch_rule_t, batch_rule_t batch_rule>
17330 at::Tensor sign_generated_plumbing(const at::Tensor & self) {
17331   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17332   auto maybe_layer = maybeCurrentDynamicLayer();
17333   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17334   int64_t cur_level = maybe_layer->layerId();
17335   if (!isBatchedAtLevel(self, cur_level)) {
17336     return at::_ops::sign::call(self);
17337   }
17338   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17339   auto results = batch_rule(self_value, self_bdim);
17340   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17341 }
17342 template <typename batch_rule_t, batch_rule_t batch_rule>
17343 at::Tensor & sign__generated_plumbing(at::Tensor & self) {
17344   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17345   auto maybe_layer = maybeCurrentDynamicLayer();
17346   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17347   int64_t cur_level = maybe_layer->layerId();
17348   if (!isBatchedAtLevel(self, cur_level)) {
17349     return at::_ops::sign_::call(self);
17350   }
17351   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17352   batch_rule(self_value, self_bdim);
17353   return self;
17354 }
17355 template <typename batch_rule_t, batch_rule_t batch_rule>
17356 at::Tensor signbit_generated_plumbing(const at::Tensor & self) {
17357   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17358   auto maybe_layer = maybeCurrentDynamicLayer();
17359   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17360   int64_t cur_level = maybe_layer->layerId();
17361   if (!isBatchedAtLevel(self, cur_level)) {
17362     return at::_ops::signbit::call(self);
17363   }
17364   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17365   auto results = batch_rule(self_value, self_bdim);
17366   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17367 }
17368 template <typename batch_rule_t, batch_rule_t batch_rule>
17369 at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
17370   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17371   auto maybe_layer = maybeCurrentDynamicLayer();
17372   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17373   int64_t cur_level = maybe_layer->layerId();
17374   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17375     return at::_ops::dist::call(self, other, p);
17376   }
17377   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17378   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17379   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p);
17380   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17381 }
17382 template <typename batch_rule_t, batch_rule_t batch_rule>
17383 at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17384   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17385   auto maybe_layer = maybeCurrentDynamicLayer();
17386   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17387   int64_t cur_level = maybe_layer->layerId();
17388   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17389     return at::_ops::atan2_::call(self, other);
17390   }
17391   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17392   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17393   batch_rule(self_value, self_bdim, other_value, other_bdim);
17394   return self;
17395 }
17396 template <typename batch_rule_t, batch_rule_t batch_rule>
17397 at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17398   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17399   auto maybe_layer = maybeCurrentDynamicLayer();
17400   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17401   int64_t cur_level = maybe_layer->layerId();
17402   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17403     return at::_ops::atan2::call(self, other);
17404   }
17405   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17406   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17407   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17408   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17409 }
17410 template <typename batch_rule_t, batch_rule_t batch_rule>
17411 at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17412   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17413   auto maybe_layer = maybeCurrentDynamicLayer();
17414   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17415   int64_t cur_level = maybe_layer->layerId();
17416   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17417     return at::_ops::arctan2::call(self, other);
17418   }
17419   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17420   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17421   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17422   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17423 }
17424 template <typename batch_rule_t, batch_rule_t batch_rule>
17425 at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17426   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17427   auto maybe_layer = maybeCurrentDynamicLayer();
17428   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17429   int64_t cur_level = maybe_layer->layerId();
17430   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17431     return at::_ops::arctan2_::call(self, other);
17432   }
17433   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17434   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17435   batch_rule(self_value, self_bdim, other_value, other_bdim);
17436   return self;
17437 }
17438 template <typename batch_rule_t, batch_rule_t batch_rule>
17439 at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
17440   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17441   auto maybe_layer = maybeCurrentDynamicLayer();
17442   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17443   int64_t cur_level = maybe_layer->layerId();
17444   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
17445     return at::_ops::lerp_Scalar::call(self, end, weight);
17446   }
17447   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17448   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
17449   auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
17450   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17451 }
17452 template <typename batch_rule_t, batch_rule_t batch_rule>
17453 at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
17454   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17455   auto maybe_layer = maybeCurrentDynamicLayer();
17456   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17457   int64_t cur_level = maybe_layer->layerId();
17458   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17459     return at::_ops::lerp_Tensor::call(self, end, weight);
17460   }
17461   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17462   auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level);
17463   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
17464   auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
17465   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17466 }
17467 template <typename batch_rule_t, batch_rule_t batch_rule>
17468 at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
17469   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17470   auto maybe_layer = maybeCurrentDynamicLayer();
17471   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17472   int64_t cur_level = maybe_layer->layerId();
17473   if (!isBatchedAtLevel(self, cur_level)) {
17474     return at::_ops::histc::call(self, bins, min, max);
17475   }
17476   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17477   auto results = batch_rule(self_value, self_bdim, bins, min, max);
17478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17479 }
17480 template <typename batch_rule_t, batch_rule_t batch_rule>
17481 ::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density) {
17482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17483   auto maybe_layer = maybeCurrentDynamicLayer();
17484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17485   int64_t cur_level = maybe_layer->layerId();
17486   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17487     return at::_ops::histogram_bins_tensor::call(self, bins, weight, density);
17488   }
17489   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17490   auto [bins_value, bins_bdim] = unwrapTensorAtLevel(bins, cur_level);
17491   std::optional<Tensor> weight_value;
17492   std::optional<int64_t> weight_bdim;
17493   if (weight) {
17494       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17495   }
17496   auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density);
17497   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
17498 }
17499 template <typename batch_rule_t, batch_rule_t batch_rule>
17500 ::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
17501   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17502   auto maybe_layer = maybeCurrentDynamicLayer();
17503   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17504   int64_t cur_level = maybe_layer->layerId();
17505   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17506     return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density);
17507   }
17508   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17509   std::optional<Tensor> weight_value;
17510   std::optional<int64_t> weight_bdim;
17511   if (weight) {
17512       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17513   }
17514   auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
17515   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
17516 }
17517 template <typename batch_rule_t, batch_rule_t batch_rule>
17518 ::std::vector<at::Tensor> _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
17519   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17520   auto maybe_layer = maybeCurrentDynamicLayer();
17521   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17522   int64_t cur_level = maybe_layer->layerId();
17523   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17524     return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
17525   }
17526   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17527   std::optional<Tensor> weight_value;
17528   std::optional<int64_t> weight_bdim;
17529   if (weight) {
17530       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17531   }
17532   auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
17533   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
17534 }
17535 template <typename batch_rule_t, batch_rule_t batch_rule>
17536 at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
17537   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17538   auto maybe_layer = maybeCurrentDynamicLayer();
17539   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17540   int64_t cur_level = maybe_layer->layerId();
17541   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17542     return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density);
17543   }
17544   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17545   std::optional<Tensor> weight_value;
17546   std::optional<int64_t> weight_bdim;
17547   if (weight) {
17548       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17549   }
17550   auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
17551   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17552 }
17553 template <typename batch_rule_t, batch_rule_t batch_rule>
17554 at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const ::std::optional<at::Tensor> & weight, bool density) {
17555   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17556   auto maybe_layer = maybeCurrentDynamicLayer();
17557   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17558   int64_t cur_level = maybe_layer->layerId();
17559   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17560     return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density);
17561   }
17562   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17563   std::optional<Tensor> weight_value;
17564   std::optional<int64_t> weight_bdim;
17565   if (weight) {
17566       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17567   }
17568   auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density);
17569   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17570 }
17571 template <typename batch_rule_t, batch_rule_t batch_rule>
17572 ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
17573   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17574   auto maybe_layer = maybeCurrentDynamicLayer();
17575   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17576   int64_t cur_level = maybe_layer->layerId();
17577   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17578     return at::_ops::histogramdd::call(self, bins, range, weight, density);
17579   }
17580   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17581   std::optional<Tensor> weight_value;
17582   std::optional<int64_t> weight_bdim;
17583   if (weight) {
17584       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17585   }
17586   auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
17587   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
17588 }
17589 template <typename batch_rule_t, batch_rule_t batch_rule>
17590 ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
17591   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17592   auto maybe_layer = maybeCurrentDynamicLayer();
17593   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17594   int64_t cur_level = maybe_layer->layerId();
17595   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17596     return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density);
17597   }
17598   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17599   std::optional<Tensor> weight_value;
17600   std::optional<int64_t> weight_bdim;
17601   if (weight) {
17602       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17603   }
17604   auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
17605   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
17606 }
17607 template <typename batch_rule_t, batch_rule_t batch_rule>
17608 ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
17609   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17610   auto maybe_layer = maybeCurrentDynamicLayer();
17611   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17612   int64_t cur_level = maybe_layer->layerId();
17613   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
17614     return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density);
17615   }
17616   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17617   std::optional<Tensor> weight_value;
17618   std::optional<int64_t> weight_bdim;
17619   if (weight) {
17620       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
17621   }
17622   auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
17623   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
17624 }
17625 template <typename batch_rule_t, batch_rule_t batch_rule>
17626 at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17627   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17628   auto maybe_layer = maybeCurrentDynamicLayer();
17629   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17630   int64_t cur_level = maybe_layer->layerId();
17631   if (!isBatchedAtLevel(self, cur_level)) {
17632     return at::_ops::fmod_Scalar::call(self, other);
17633   }
17634   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17635   auto results = batch_rule(self_value, self_bdim, other);
17636   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17637 }
17638 template <typename batch_rule_t, batch_rule_t batch_rule>
17639 at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17640   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17641   auto maybe_layer = maybeCurrentDynamicLayer();
17642   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17643   int64_t cur_level = maybe_layer->layerId();
17644   if (!isBatchedAtLevel(self, cur_level)) {
17645     return at::_ops::fmod__Scalar::call(self, other);
17646   }
17647   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17648   batch_rule(self_value, self_bdim, other);
17649   return self;
17650 }
17651 template <typename batch_rule_t, batch_rule_t batch_rule>
17652 at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17653   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17654   auto maybe_layer = maybeCurrentDynamicLayer();
17655   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17656   int64_t cur_level = maybe_layer->layerId();
17657   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17658     return at::_ops::fmod_Tensor::call(self, other);
17659   }
17660   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17661   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17662   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17663   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17664 }
17665 template <typename batch_rule_t, batch_rule_t batch_rule>
17666 at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17667   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17668   auto maybe_layer = maybeCurrentDynamicLayer();
17669   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17670   int64_t cur_level = maybe_layer->layerId();
17671   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17672     return at::_ops::fmod__Tensor::call(self, other);
17673   }
17674   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17675   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17676   batch_rule(self_value, self_bdim, other_value, other_bdim);
17677   return self;
17678 }
17679 template <typename batch_rule_t, batch_rule_t batch_rule>
17680 at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17681   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17682   auto maybe_layer = maybeCurrentDynamicLayer();
17683   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17684   int64_t cur_level = maybe_layer->layerId();
17685   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17686     return at::_ops::hypot::call(self, other);
17687   }
17688   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17689   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17690   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17691   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17692 }
17693 template <typename batch_rule_t, batch_rule_t batch_rule>
17694 at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17695   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17696   auto maybe_layer = maybeCurrentDynamicLayer();
17697   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17698   int64_t cur_level = maybe_layer->layerId();
17699   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17700     return at::_ops::hypot_::call(self, other);
17701   }
17702   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17703   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17704   batch_rule(self_value, self_bdim, other_value, other_bdim);
17705   return self;
17706 }
17707 template <typename batch_rule_t, batch_rule_t batch_rule>
17708 at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17709   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17710   auto maybe_layer = maybeCurrentDynamicLayer();
17711   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17712   int64_t cur_level = maybe_layer->layerId();
17713   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17714     return at::_ops::igamma::call(self, other);
17715   }
17716   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17717   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17718   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17719   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17720 }
17721 template <typename batch_rule_t, batch_rule_t batch_rule>
17722 at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17723   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17724   auto maybe_layer = maybeCurrentDynamicLayer();
17725   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17726   int64_t cur_level = maybe_layer->layerId();
17727   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17728     return at::_ops::igamma_::call(self, other);
17729   }
17730   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17731   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17732   batch_rule(self_value, self_bdim, other_value, other_bdim);
17733   return self;
17734 }
17735 template <typename batch_rule_t, batch_rule_t batch_rule>
17736 at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17737   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17738   auto maybe_layer = maybeCurrentDynamicLayer();
17739   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17740   int64_t cur_level = maybe_layer->layerId();
17741   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17742     return at::_ops::igammac::call(self, other);
17743   }
17744   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17745   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17746   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17747   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17748 }
17749 template <typename batch_rule_t, batch_rule_t batch_rule>
17750 at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17751   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17752   auto maybe_layer = maybeCurrentDynamicLayer();
17753   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17754   int64_t cur_level = maybe_layer->layerId();
17755   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17756     return at::_ops::igammac_::call(self, other);
17757   }
17758   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17759   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17760   batch_rule(self_value, self_bdim, other_value, other_bdim);
17761   return self;
17762 }
17763 template <typename batch_rule_t, batch_rule_t batch_rule>
17764 at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17765   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17766   auto maybe_layer = maybeCurrentDynamicLayer();
17767   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17768   int64_t cur_level = maybe_layer->layerId();
17769   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17770     return at::_ops::nextafter::call(self, other);
17771   }
17772   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17773   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17774   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17775   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17776 }
17777 template <typename batch_rule_t, batch_rule_t batch_rule>
17778 at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17779   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17780   auto maybe_layer = maybeCurrentDynamicLayer();
17781   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17782   int64_t cur_level = maybe_layer->layerId();
17783   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17784     return at::_ops::nextafter_::call(self, other);
17785   }
17786   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17787   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17788   batch_rule(self_value, self_bdim, other_value, other_bdim);
17789   return self;
17790 }
17791 template <typename batch_rule_t, batch_rule_t batch_rule>
17792 at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17793   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17794   auto maybe_layer = maybeCurrentDynamicLayer();
17795   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17796   int64_t cur_level = maybe_layer->layerId();
17797   if (!isBatchedAtLevel(self, cur_level)) {
17798     return at::_ops::remainder_Scalar::call(self, other);
17799   }
17800   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17801   auto results = batch_rule(self_value, self_bdim, other);
17802   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17803 }
17804 template <typename batch_rule_t, batch_rule_t batch_rule>
17805 at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17806   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17807   auto maybe_layer = maybeCurrentDynamicLayer();
17808   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17809   int64_t cur_level = maybe_layer->layerId();
17810   if (!isBatchedAtLevel(self, cur_level)) {
17811     return at::_ops::remainder__Scalar::call(self, other);
17812   }
17813   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17814   batch_rule(self_value, self_bdim, other);
17815   return self;
17816 }
17817 template <typename batch_rule_t, batch_rule_t batch_rule>
17818 at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17819   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17820   auto maybe_layer = maybeCurrentDynamicLayer();
17821   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17822   int64_t cur_level = maybe_layer->layerId();
17823   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17824     return at::_ops::remainder_Tensor::call(self, other);
17825   }
17826   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17827   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17828   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17829   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17830 }
17831 template <typename batch_rule_t, batch_rule_t batch_rule>
17832 at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17833   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17834   auto maybe_layer = maybeCurrentDynamicLayer();
17835   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17836   int64_t cur_level = maybe_layer->layerId();
17837   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17838     return at::_ops::remainder__Tensor::call(self, other);
17839   }
17840   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17841   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17842   batch_rule(self_value, self_bdim, other_value, other_bdim);
17843   return self;
17844 }
17845 template <typename batch_rule_t, batch_rule_t batch_rule>
17846 at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
17847   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17848   auto maybe_layer = maybeCurrentDynamicLayer();
17849   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17850   int64_t cur_level = maybe_layer->layerId();
17851   if (!isBatchedAtLevel(other, cur_level)) {
17852     return at::_ops::remainder_Scalar_Tensor::call(self, other);
17853   }
17854   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17855   auto results = batch_rule(self, other_value, other_bdim);
17856   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17857 }
17858 template <typename batch_rule_t, batch_rule_t batch_rule>
17859 at::Tensor min_generated_plumbing(const at::Tensor & self) {
17860   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17861   auto maybe_layer = maybeCurrentDynamicLayer();
17862   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17863   int64_t cur_level = maybe_layer->layerId();
17864   if (!isBatchedAtLevel(self, cur_level)) {
17865     return at::_ops::min::call(self);
17866   }
17867   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17868   auto results = batch_rule(self_value, self_bdim);
17869   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17870 }
17871 template <typename batch_rule_t, batch_rule_t batch_rule>
17872 at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17873   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17874   auto maybe_layer = maybeCurrentDynamicLayer();
17875   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17876   int64_t cur_level = maybe_layer->layerId();
17877   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17878     return at::_ops::fmin::call(self, other);
17879   }
17880   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17881   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17882   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17883   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17884 }
17885 template <typename batch_rule_t, batch_rule_t batch_rule>
17886 at::Tensor max_generated_plumbing(const at::Tensor & self) {
17887   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17888   auto maybe_layer = maybeCurrentDynamicLayer();
17889   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17890   int64_t cur_level = maybe_layer->layerId();
17891   if (!isBatchedAtLevel(self, cur_level)) {
17892     return at::_ops::max::call(self);
17893   }
17894   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17895   auto results = batch_rule(self_value, self_bdim);
17896   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17897 }
17898 template <typename batch_rule_t, batch_rule_t batch_rule>
17899 at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17900   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17901   auto maybe_layer = maybeCurrentDynamicLayer();
17902   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17903   int64_t cur_level = maybe_layer->layerId();
17904   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17905     return at::_ops::fmax::call(self, other);
17906   }
17907   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17908   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17909   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17910   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17911 }
17912 template <typename batch_rule_t, batch_rule_t batch_rule>
17913 at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17914   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17915   auto maybe_layer = maybeCurrentDynamicLayer();
17916   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17917   int64_t cur_level = maybe_layer->layerId();
17918   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17919     return at::_ops::maximum::call(self, other);
17920   }
17921   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17922   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17923   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17924   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17925 }
17926 template <typename batch_rule_t, batch_rule_t batch_rule>
17927 at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17928   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17929   auto maybe_layer = maybeCurrentDynamicLayer();
17930   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17931   int64_t cur_level = maybe_layer->layerId();
17932   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17933     return at::_ops::max_other::call(self, other);
17934   }
17935   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17936   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17937   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17938   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17939 }
17940 template <typename batch_rule_t, batch_rule_t batch_rule>
17941 at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17942   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17943   auto maybe_layer = maybeCurrentDynamicLayer();
17944   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17945   int64_t cur_level = maybe_layer->layerId();
17946   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17947     return at::_ops::minimum::call(self, other);
17948   }
17949   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17950   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17951   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17952   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17953 }
17954 template <typename batch_rule_t, batch_rule_t batch_rule>
17955 at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17956   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17957   auto maybe_layer = maybeCurrentDynamicLayer();
17958   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17959   int64_t cur_level = maybe_layer->layerId();
17960   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17961     return at::_ops::min_other::call(self, other);
17962   }
17963   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17964   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
17965   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17966   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17967 }
17968 template <typename batch_rule_t, batch_rule_t batch_rule>
17969 at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
17970   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17971   auto maybe_layer = maybeCurrentDynamicLayer();
17972   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17973   int64_t cur_level = maybe_layer->layerId();
17974   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
17975     return at::_ops::quantile::call(self, q, dim, keepdim, interpolation);
17976   }
17977   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17978   auto [q_value, q_bdim] = unwrapTensorAtLevel(q, cur_level);
17979   auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
17980   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17981 }
17982 template <typename batch_rule_t, batch_rule_t batch_rule>
17983 at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
17984   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17985   auto maybe_layer = maybeCurrentDynamicLayer();
17986   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17987   int64_t cur_level = maybe_layer->layerId();
17988   if (!isBatchedAtLevel(self, cur_level)) {
17989     return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation);
17990   }
17991   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
17992   auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
17993   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17994 }
17995 template <typename batch_rule_t, batch_rule_t batch_rule>
17996 at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
17997   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17998   auto maybe_layer = maybeCurrentDynamicLayer();
17999   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18000   int64_t cur_level = maybe_layer->layerId();
18001   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
18002     return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation);
18003   }
18004   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18005   auto [q_value, q_bdim] = unwrapTensorAtLevel(q, cur_level);
18006   auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
18007   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18008 }
18009 template <typename batch_rule_t, batch_rule_t batch_rule>
18010 at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
18011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18012   auto maybe_layer = maybeCurrentDynamicLayer();
18013   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18014   int64_t cur_level = maybe_layer->layerId();
18015   if (!isBatchedAtLevel(self, cur_level)) {
18016     return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation);
18017   }
18018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18019   auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
18020   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18021 }
18022 template <typename batch_rule_t, batch_rule_t batch_rule>
18023 ::std::tuple<at::Tensor,at::Tensor> sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
18024   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18025   auto maybe_layer = maybeCurrentDynamicLayer();
18026   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18027   int64_t cur_level = maybe_layer->layerId();
18028   if (!isBatchedAtLevel(self, cur_level)) {
18029     return at::_ops::sort::call(self, dim, descending);
18030   }
18031   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18032   auto results = batch_rule(self_value, self_bdim, dim, descending);
18033   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
18034 }
18035 template <typename batch_rule_t, batch_rule_t batch_rule>
18036 ::std::tuple<at::Tensor,at::Tensor> sort_stable_generated_plumbing(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
18037   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18038   auto maybe_layer = maybeCurrentDynamicLayer();
18039   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18040   int64_t cur_level = maybe_layer->layerId();
18041   if (!isBatchedAtLevel(self, cur_level)) {
18042     return at::_ops::sort_stable::call(self, stable, dim, descending);
18043   }
18044   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18045   auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
18046   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
18047 }
18048 template <typename batch_rule_t, batch_rule_t batch_rule>
18049 ::std::tuple<at::Tensor,at::Tensor> sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
18050   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18051   auto maybe_layer = maybeCurrentDynamicLayer();
18052   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18053   int64_t cur_level = maybe_layer->layerId();
18054   if (!isBatchedAtLevel(self, cur_level)) {
18055     return at::_ops::sort_dimname::call(self, dim, descending);
18056   }
18057   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18058   auto results = batch_rule(self_value, self_bdim, dim, descending);
18059   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
18060 }
18061 template <typename batch_rule_t, batch_rule_t batch_rule>
18062 ::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable_generated_plumbing(const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending) {
18063   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18064   auto maybe_layer = maybeCurrentDynamicLayer();
18065   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18066   int64_t cur_level = maybe_layer->layerId();
18067   if (!isBatchedAtLevel(self, cur_level)) {
18068     return at::_ops::sort_dimname_stable::call(self, stable, dim, descending);
18069   }
18070   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18071   auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
18072   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
18073 }
18074 template <typename batch_rule_t, batch_rule_t batch_rule>
18075 at::Tensor msort_generated_plumbing(const at::Tensor & self) {
18076   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18077   auto maybe_layer = maybeCurrentDynamicLayer();
18078   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18079   int64_t cur_level = maybe_layer->layerId();
18080   if (!isBatchedAtLevel(self, cur_level)) {
18081     return at::_ops::msort::call(self);
18082   }
18083   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18084   auto results = batch_rule(self_value, self_bdim);
18085   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18086 }
18087 template <typename batch_rule_t, batch_rule_t batch_rule>
18088 at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
18089   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18090   auto maybe_layer = maybeCurrentDynamicLayer();
18091   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18092   int64_t cur_level = maybe_layer->layerId();
18093   if (!isBatchedAtLevel(self, cur_level)) {
18094     return at::_ops::argsort::call(self, dim, descending);
18095   }
18096   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18097   auto results = batch_rule(self_value, self_bdim, dim, descending);
18098   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18099 }
18100 template <typename batch_rule_t, batch_rule_t batch_rule>
18101 at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
18102   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18103   auto maybe_layer = maybeCurrentDynamicLayer();
18104   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18105   int64_t cur_level = maybe_layer->layerId();
18106   if (!isBatchedAtLevel(self, cur_level)) {
18107     return at::_ops::argsort_stable::call(self, stable, dim, descending);
18108   }
18109   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18110   auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
18111   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18112 }
18113 template <typename batch_rule_t, batch_rule_t batch_rule>
18114 at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
18115   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18116   auto maybe_layer = maybeCurrentDynamicLayer();
18117   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18118   int64_t cur_level = maybe_layer->layerId();
18119   if (!isBatchedAtLevel(self, cur_level)) {
18120     return at::_ops::argsort_dimname::call(self, dim, descending);
18121   }
18122   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18123   auto results = batch_rule(self_value, self_bdim, dim, descending);
18124   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18125 }
18126 template <typename batch_rule_t, batch_rule_t batch_rule>
18127 ::std::tuple<at::Tensor,at::Tensor> topk_generated_plumbing(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted) {
18128   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18129   auto maybe_layer = maybeCurrentDynamicLayer();
18130   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18131   int64_t cur_level = maybe_layer->layerId();
18132   if (!isBatchedAtLevel(self, cur_level)) {
18133     return at::_ops::topk::call(self, k, dim, largest, sorted);
18134   }
18135   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18136   auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted);
18137   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
18138 }
18139 template <typename batch_rule_t, batch_rule_t batch_rule>
18140 at::Tensor all_generated_plumbing(const at::Tensor & self) {
18141   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18142   auto maybe_layer = maybeCurrentDynamicLayer();
18143   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18144   int64_t cur_level = maybe_layer->layerId();
18145   if (!isBatchedAtLevel(self, cur_level)) {
18146     return at::_ops::all::call(self);
18147   }
18148   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18149   auto results = batch_rule(self_value, self_bdim);
18150   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18151 }
18152 template <typename batch_rule_t, batch_rule_t batch_rule>
18153 at::Tensor any_generated_plumbing(const at::Tensor & self) {
18154   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18155   auto maybe_layer = maybeCurrentDynamicLayer();
18156   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18157   int64_t cur_level = maybe_layer->layerId();
18158   if (!isBatchedAtLevel(self, cur_level)) {
18159     return at::_ops::any::call(self);
18160   }
18161   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18162   auto results = batch_rule(self_value, self_bdim);
18163   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18164 }
18165 template <typename batch_rule_t, batch_rule_t batch_rule>
18166 at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
18167   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18168   auto maybe_layer = maybeCurrentDynamicLayer();
18169   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18170   int64_t cur_level = maybe_layer->layerId();
18171   if (!isBatchedAtLevel(self, cur_level)) {
18172     return at::_ops::renorm::call(self, p, dim, maxnorm);
18173   }
18174   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18175   auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm);
18176   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18177 }
18178 template <typename batch_rule_t, batch_rule_t batch_rule>
18179 at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
18180   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18181   auto maybe_layer = maybeCurrentDynamicLayer();
18182   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18183   int64_t cur_level = maybe_layer->layerId();
18184   if (!isBatchedAtLevel(self, cur_level)) {
18185     return at::_ops::renorm_::call(self, p, dim, maxnorm);
18186   }
18187   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18188   batch_rule(self_value, self_bdim, p, dim, maxnorm);
18189   return self;
18190 }
18191 template <typename batch_rule_t, batch_rule_t batch_rule>
18192 at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
18193   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18194   auto maybe_layer = maybeCurrentDynamicLayer();
18195   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18196   int64_t cur_level = maybe_layer->layerId();
18197   if (!isBatchedAtLevel(self, cur_level)) {
18198     return at::_ops::unfold::call(self, dimension, size, step);
18199   }
18200   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18201   auto results = batch_rule(self_value, self_bdim, dimension, size, step);
18202   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18203 }
18204 template <typename batch_rule_t, batch_rule_t batch_rule>
18205 at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
18206   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18207   auto maybe_layer = maybeCurrentDynamicLayer();
18208   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18209   int64_t cur_level = maybe_layer->layerId();
18210   if (!isBatchedAtLevel(grad_in, cur_level)) {
18211     return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
18212   }
18213   auto [grad_in_value, grad_in_bdim] = unwrapTensorAtLevel(grad_in, cur_level);
18214   auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step);
18215   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18216 }
18217 template <typename batch_rule_t, batch_rule_t batch_rule>
18218 at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
18219   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18220   auto maybe_layer = maybeCurrentDynamicLayer();
18221   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18222   int64_t cur_level = maybe_layer->layerId();
18223   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
18224     return at::_ops::pow_Tensor_Tensor::call(self, exponent);
18225   }
18226   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18227   auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level);
18228   auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
18229   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18230 }
18231 template <typename batch_rule_t, batch_rule_t batch_rule>
18232 at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
18233   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18234   auto maybe_layer = maybeCurrentDynamicLayer();
18235   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18236   int64_t cur_level = maybe_layer->layerId();
18237   if (!isBatchedAtLevel(exponent, cur_level)) {
18238     return at::_ops::pow_Scalar::call(self, exponent);
18239   }
18240   auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level);
18241   auto results = batch_rule(self, exponent_value, exponent_bdim);
18242   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18243 }
18244 template <typename batch_rule_t, batch_rule_t batch_rule>
18245 at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
18246   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18247   auto maybe_layer = maybeCurrentDynamicLayer();
18248   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18249   int64_t cur_level = maybe_layer->layerId();
18250   if (!isBatchedAtLevel(self, cur_level)) {
18251     return at::_ops::pow_Tensor_Scalar::call(self, exponent);
18252   }
18253   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18254   auto results = batch_rule(self_value, self_bdim, exponent);
18255   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18256 }
18257 template <typename batch_rule_t, batch_rule_t batch_rule>
18258 at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
18259   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18260   auto maybe_layer = maybeCurrentDynamicLayer();
18261   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18262   int64_t cur_level = maybe_layer->layerId();
18263   if (!isBatchedAtLevel(self, cur_level)) {
18264     return at::_ops::pow__Scalar::call(self, exponent);
18265   }
18266   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18267   batch_rule(self_value, self_bdim, exponent);
18268   return self;
18269 }
18270 template <typename batch_rule_t, batch_rule_t batch_rule>
18271 at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
18272   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18273   auto maybe_layer = maybeCurrentDynamicLayer();
18274   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18275   int64_t cur_level = maybe_layer->layerId();
18276   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
18277     return at::_ops::pow__Tensor::call(self, exponent);
18278   }
18279   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18280   auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level);
18281   batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
18282   return self;
18283 }
18284 template <typename batch_rule_t, batch_rule_t batch_rule>
18285 at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
18286   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18287   auto maybe_layer = maybeCurrentDynamicLayer();
18288   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18289   int64_t cur_level = maybe_layer->layerId();
18290   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
18291     return at::_ops::float_power_Tensor_Tensor::call(self, exponent);
18292   }
18293   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18294   auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level);
18295   auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
18296   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18297 }
18298 template <typename batch_rule_t, batch_rule_t batch_rule>
18299 at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
18300   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18301   auto maybe_layer = maybeCurrentDynamicLayer();
18302   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18303   int64_t cur_level = maybe_layer->layerId();
18304   if (!isBatchedAtLevel(exponent, cur_level)) {
18305     return at::_ops::float_power_Scalar::call(self, exponent);
18306   }
18307   auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level);
18308   auto results = batch_rule(self, exponent_value, exponent_bdim);
18309   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18310 }
18311 template <typename batch_rule_t, batch_rule_t batch_rule>
18312 at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
18313   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18314   auto maybe_layer = maybeCurrentDynamicLayer();
18315   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18316   int64_t cur_level = maybe_layer->layerId();
18317   if (!isBatchedAtLevel(self, cur_level)) {
18318     return at::_ops::float_power_Tensor_Scalar::call(self, exponent);
18319   }
18320   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18321   auto results = batch_rule(self_value, self_bdim, exponent);
18322   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18323 }
18324 template <typename batch_rule_t, batch_rule_t batch_rule>
18325 at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
18326   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18327   auto maybe_layer = maybeCurrentDynamicLayer();
18328   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18329   int64_t cur_level = maybe_layer->layerId();
18330   if (!isBatchedAtLevel(self, cur_level)) {
18331     return at::_ops::float_power__Scalar::call(self, exponent);
18332   }
18333   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18334   batch_rule(self_value, self_bdim, exponent);
18335   return self;
18336 }
18337 template <typename batch_rule_t, batch_rule_t batch_rule>
18338 at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
18339   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18340   auto maybe_layer = maybeCurrentDynamicLayer();
18341   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18342   int64_t cur_level = maybe_layer->layerId();
18343   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
18344     return at::_ops::float_power__Tensor::call(self, exponent);
18345   }
18346   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18347   auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level);
18348   batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
18349   return self;
18350 }
18351 template <typename batch_rule_t, batch_rule_t batch_rule>
18352 at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
18353   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18354   auto maybe_layer = maybeCurrentDynamicLayer();
18355   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18356   int64_t cur_level = maybe_layer->layerId();
18357   if (!isBatchedAtLevel(self, cur_level)) {
18358     return at::_ops::normal_::call(self, mean, std, generator);
18359   }
18360   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18361   batch_rule(self_value, self_bdim, mean, std, generator);
18362   return self;
18363 }
18364 template <typename batch_rule_t, batch_rule_t batch_rule>
18365 at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
18366   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18367   auto maybe_layer = maybeCurrentDynamicLayer();
18368   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18369   int64_t cur_level = maybe_layer->layerId();
18370   if (!isBatchedAtLevel(self, cur_level)) {
18371     return at::_ops::normal_functional::call(self, mean, std, generator);
18372   }
18373   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18374   auto results = batch_rule(self_value, self_bdim, mean, std, generator);
18375   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18376 }
18377 template <typename batch_rule_t, batch_rule_t batch_rule>
18378 at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator) {
18379   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18380   auto maybe_layer = maybeCurrentDynamicLayer();
18381   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18382   int64_t cur_level = maybe_layer->layerId();
18383   if (!isBatchedAtLevel(mean, cur_level)) {
18384     return at::_ops::normal_Tensor_float::call(mean, std, generator);
18385   }
18386   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
18387   auto results = batch_rule(mean_value, mean_bdim, std, generator);
18388   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18389 }
18390 template <typename batch_rule_t, batch_rule_t batch_rule>
18391 at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
18392   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18393   auto maybe_layer = maybeCurrentDynamicLayer();
18394   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18395   int64_t cur_level = maybe_layer->layerId();
18396   if (!isBatchedAtLevel(std, cur_level)) {
18397     return at::_ops::normal_float_Tensor::call(mean, std, generator);
18398   }
18399   auto [std_value, std_bdim] = unwrapTensorAtLevel(std, cur_level);
18400   auto results = batch_rule(mean, std_value, std_bdim, generator);
18401   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18402 }
18403 template <typename batch_rule_t, batch_rule_t batch_rule>
18404 at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
18405   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18406   auto maybe_layer = maybeCurrentDynamicLayer();
18407   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18408   int64_t cur_level = maybe_layer->layerId();
18409   if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) {
18410     return at::_ops::normal_Tensor_Tensor::call(mean, std, generator);
18411   }
18412   auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level);
18413   auto [std_value, std_bdim] = unwrapTensorAtLevel(std, cur_level);
18414   auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator);
18415   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18416 }
18417 template <typename batch_rule_t, batch_rule_t batch_rule>
18418 at::Tensor alias_generated_plumbing(const at::Tensor & self) {
18419   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18420   auto maybe_layer = maybeCurrentDynamicLayer();
18421   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18422   int64_t cur_level = maybe_layer->layerId();
18423   if (!isBatchedAtLevel(self, cur_level)) {
18424     return at::_ops::alias::call(self);
18425   }
18426   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
18427   auto results = batch_rule(self_value, self_bdim);
18428   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18429 }
18430 template <typename batch_rule_t, batch_rule_t batch_rule>
18431 void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
18432   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18433   auto maybe_layer = maybeCurrentDynamicLayer();
18434   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18435   int64_t cur_level = maybe_layer->layerId();
18436   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
18437     return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
18438   }
18439   auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level);
18440   auto [inv_scale_value, inv_scale_bdim] = unwrapTensorAtLevel(inv_scale, cur_level);
18441   batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
18442 }
18443 template <typename batch_rule_t, batch_rule_t batch_rule>
18444 ::std::vector<at::Tensor> _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18445   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18446   auto maybe_layer = maybeCurrentDynamicLayer();
18447   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18448   int64_t cur_level = maybe_layer->layerId();
18449   if (!isBatchedAtLevel(self, cur_level)) {
18450     return at::_ops::_foreach_add_Scalar::call(self, scalar);
18451   }
18452 
18453   auto results = batch_rule(self, scalar);
18454   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18455 }
18456 template <typename batch_rule_t, batch_rule_t batch_rule>
18457 void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18458   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18459   auto maybe_layer = maybeCurrentDynamicLayer();
18460   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18461   int64_t cur_level = maybe_layer->layerId();
18462   if (!isBatchedAtLevel(self, cur_level)) {
18463     return at::_ops::_foreach_add__Scalar::call(self, scalar);
18464   }
18465 
18466   batch_rule(self, scalar);
18467 }
18468 template <typename batch_rule_t, batch_rule_t batch_rule>
18469 ::std::vector<at::Tensor> _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
18470   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18471   auto maybe_layer = maybeCurrentDynamicLayer();
18472   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18473   int64_t cur_level = maybe_layer->layerId();
18474   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18475     return at::_ops::_foreach_add_List::call(self, other, alpha);
18476   }
18477 
18478   auto results = batch_rule(self, other, alpha);
18479   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18480 }
18481 template <typename batch_rule_t, batch_rule_t batch_rule>
18482 void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
18483   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18484   auto maybe_layer = maybeCurrentDynamicLayer();
18485   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18486   int64_t cur_level = maybe_layer->layerId();
18487   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18488     return at::_ops::_foreach_add__List::call(self, other, alpha);
18489   }
18490 
18491   batch_rule(self, other, alpha);
18492 }
18493 template <typename batch_rule_t, batch_rule_t batch_rule>
18494 ::std::vector<at::Tensor> _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18495   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18496   auto maybe_layer = maybeCurrentDynamicLayer();
18497   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18498   int64_t cur_level = maybe_layer->layerId();
18499   if (!isBatchedAtLevel(self, cur_level)) {
18500     return at::_ops::_foreach_add_ScalarList::call(self, scalars);
18501   }
18502 
18503   auto results = batch_rule(self, scalars);
18504   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18505 }
18506 template <typename batch_rule_t, batch_rule_t batch_rule>
18507 void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18508   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18509   auto maybe_layer = maybeCurrentDynamicLayer();
18510   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18511   int64_t cur_level = maybe_layer->layerId();
18512   if (!isBatchedAtLevel(self, cur_level)) {
18513     return at::_ops::_foreach_add__ScalarList::call(self, scalars);
18514   }
18515 
18516   batch_rule(self, scalars);
18517 }
18518 template <typename batch_rule_t, batch_rule_t batch_rule>
18519 ::std::vector<at::Tensor> _foreach_add_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
18520   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18521   auto maybe_layer = maybeCurrentDynamicLayer();
18522   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18523   int64_t cur_level = maybe_layer->layerId();
18524   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18525     return at::_ops::_foreach_add_Tensor::call(self, other, alpha);
18526   }
18527   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
18528   auto results = batch_rule(self, other_value, other_bdim, alpha);
18529   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18530 }
18531 template <typename batch_rule_t, batch_rule_t batch_rule>
18532 void _foreach_add__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
18533   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18534   auto maybe_layer = maybeCurrentDynamicLayer();
18535   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18536   int64_t cur_level = maybe_layer->layerId();
18537   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18538     return at::_ops::_foreach_add__Tensor::call(self, other, alpha);
18539   }
18540   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
18541   batch_rule(self, other_value, other_bdim, alpha);
18542 }
18543 template <typename batch_rule_t, batch_rule_t batch_rule>
18544 ::std::vector<at::Tensor> _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18545   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18546   auto maybe_layer = maybeCurrentDynamicLayer();
18547   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18548   int64_t cur_level = maybe_layer->layerId();
18549   if (!isBatchedAtLevel(self, cur_level)) {
18550     return at::_ops::_foreach_sub_Scalar::call(self, scalar);
18551   }
18552 
18553   auto results = batch_rule(self, scalar);
18554   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18555 }
18556 template <typename batch_rule_t, batch_rule_t batch_rule>
18557 void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18558   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18559   auto maybe_layer = maybeCurrentDynamicLayer();
18560   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18561   int64_t cur_level = maybe_layer->layerId();
18562   if (!isBatchedAtLevel(self, cur_level)) {
18563     return at::_ops::_foreach_sub__Scalar::call(self, scalar);
18564   }
18565 
18566   batch_rule(self, scalar);
18567 }
18568 template <typename batch_rule_t, batch_rule_t batch_rule>
18569 ::std::vector<at::Tensor> _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
18570   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18571   auto maybe_layer = maybeCurrentDynamicLayer();
18572   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18573   int64_t cur_level = maybe_layer->layerId();
18574   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18575     return at::_ops::_foreach_sub_List::call(self, other, alpha);
18576   }
18577 
18578   auto results = batch_rule(self, other, alpha);
18579   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18580 }
18581 template <typename batch_rule_t, batch_rule_t batch_rule>
18582 void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
18583   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18584   auto maybe_layer = maybeCurrentDynamicLayer();
18585   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18586   int64_t cur_level = maybe_layer->layerId();
18587   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18588     return at::_ops::_foreach_sub__List::call(self, other, alpha);
18589   }
18590 
18591   batch_rule(self, other, alpha);
18592 }
18593 template <typename batch_rule_t, batch_rule_t batch_rule>
18594 ::std::vector<at::Tensor> _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18595   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18596   auto maybe_layer = maybeCurrentDynamicLayer();
18597   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18598   int64_t cur_level = maybe_layer->layerId();
18599   if (!isBatchedAtLevel(self, cur_level)) {
18600     return at::_ops::_foreach_sub_ScalarList::call(self, scalars);
18601   }
18602 
18603   auto results = batch_rule(self, scalars);
18604   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18605 }
18606 template <typename batch_rule_t, batch_rule_t batch_rule>
18607 void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18608   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18609   auto maybe_layer = maybeCurrentDynamicLayer();
18610   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18611   int64_t cur_level = maybe_layer->layerId();
18612   if (!isBatchedAtLevel(self, cur_level)) {
18613     return at::_ops::_foreach_sub__ScalarList::call(self, scalars);
18614   }
18615 
18616   batch_rule(self, scalars);
18617 }
18618 template <typename batch_rule_t, batch_rule_t batch_rule>
18619 ::std::vector<at::Tensor> _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18620   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18621   auto maybe_layer = maybeCurrentDynamicLayer();
18622   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18623   int64_t cur_level = maybe_layer->layerId();
18624   if (!isBatchedAtLevel(self, cur_level)) {
18625     return at::_ops::_foreach_mul_Scalar::call(self, scalar);
18626   }
18627 
18628   auto results = batch_rule(self, scalar);
18629   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18630 }
18631 template <typename batch_rule_t, batch_rule_t batch_rule>
18632 void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18633   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18634   auto maybe_layer = maybeCurrentDynamicLayer();
18635   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18636   int64_t cur_level = maybe_layer->layerId();
18637   if (!isBatchedAtLevel(self, cur_level)) {
18638     return at::_ops::_foreach_mul__Scalar::call(self, scalar);
18639   }
18640 
18641   batch_rule(self, scalar);
18642 }
18643 template <typename batch_rule_t, batch_rule_t batch_rule>
18644 ::std::vector<at::Tensor> _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) {
18645   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18646   auto maybe_layer = maybeCurrentDynamicLayer();
18647   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18648   int64_t cur_level = maybe_layer->layerId();
18649   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18650     return at::_ops::_foreach_mul_List::call(self, other);
18651   }
18652 
18653   auto results = batch_rule(self, other);
18654   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18655 }
18656 template <typename batch_rule_t, batch_rule_t batch_rule>
18657 void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) {
18658   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18659   auto maybe_layer = maybeCurrentDynamicLayer();
18660   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18661   int64_t cur_level = maybe_layer->layerId();
18662   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18663     return at::_ops::_foreach_mul__List::call(self, other);
18664   }
18665 
18666   batch_rule(self, other);
18667 }
18668 template <typename batch_rule_t, batch_rule_t batch_rule>
18669 ::std::vector<at::Tensor> _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18670   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18671   auto maybe_layer = maybeCurrentDynamicLayer();
18672   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18673   int64_t cur_level = maybe_layer->layerId();
18674   if (!isBatchedAtLevel(self, cur_level)) {
18675     return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
18676   }
18677 
18678   auto results = batch_rule(self, scalars);
18679   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18680 }
18681 template <typename batch_rule_t, batch_rule_t batch_rule>
18682 void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18683   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18684   auto maybe_layer = maybeCurrentDynamicLayer();
18685   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18686   int64_t cur_level = maybe_layer->layerId();
18687   if (!isBatchedAtLevel(self, cur_level)) {
18688     return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
18689   }
18690 
18691   batch_rule(self, scalars);
18692 }
18693 template <typename batch_rule_t, batch_rule_t batch_rule>
18694 ::std::vector<at::Tensor> _foreach_mul_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
18695   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18696   auto maybe_layer = maybeCurrentDynamicLayer();
18697   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18698   int64_t cur_level = maybe_layer->layerId();
18699   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18700     return at::_ops::_foreach_mul_Tensor::call(self, other);
18701   }
18702   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
18703   auto results = batch_rule(self, other_value, other_bdim);
18704   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18705 }
18706 template <typename batch_rule_t, batch_rule_t batch_rule>
18707 void _foreach_mul__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
18708   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18709   auto maybe_layer = maybeCurrentDynamicLayer();
18710   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18711   int64_t cur_level = maybe_layer->layerId();
18712   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18713     return at::_ops::_foreach_mul__Tensor::call(self, other);
18714   }
18715   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
18716   batch_rule(self, other_value, other_bdim);
18717 }
18718 template <typename batch_rule_t, batch_rule_t batch_rule>
18719 ::std::vector<at::Tensor> _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18720   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18721   auto maybe_layer = maybeCurrentDynamicLayer();
18722   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18723   int64_t cur_level = maybe_layer->layerId();
18724   if (!isBatchedAtLevel(self, cur_level)) {
18725     return at::_ops::_foreach_div_Scalar::call(self, scalar);
18726   }
18727 
18728   auto results = batch_rule(self, scalar);
18729   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18730 }
18731 template <typename batch_rule_t, batch_rule_t batch_rule>
18732 void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18733   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18734   auto maybe_layer = maybeCurrentDynamicLayer();
18735   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18736   int64_t cur_level = maybe_layer->layerId();
18737   if (!isBatchedAtLevel(self, cur_level)) {
18738     return at::_ops::_foreach_div__Scalar::call(self, scalar);
18739   }
18740 
18741   batch_rule(self, scalar);
18742 }
18743 template <typename batch_rule_t, batch_rule_t batch_rule>
18744 ::std::vector<at::Tensor> _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) {
18745   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18746   auto maybe_layer = maybeCurrentDynamicLayer();
18747   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18748   int64_t cur_level = maybe_layer->layerId();
18749   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18750     return at::_ops::_foreach_div_List::call(self, other);
18751   }
18752 
18753   auto results = batch_rule(self, other);
18754   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18755 }
18756 template <typename batch_rule_t, batch_rule_t batch_rule>
18757 void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) {
18758   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18759   auto maybe_layer = maybeCurrentDynamicLayer();
18760   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18761   int64_t cur_level = maybe_layer->layerId();
18762   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18763     return at::_ops::_foreach_div__List::call(self, other);
18764   }
18765 
18766   batch_rule(self, other);
18767 }
18768 template <typename batch_rule_t, batch_rule_t batch_rule>
18769 ::std::vector<at::Tensor> _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18770   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18771   auto maybe_layer = maybeCurrentDynamicLayer();
18772   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18773   int64_t cur_level = maybe_layer->layerId();
18774   if (!isBatchedAtLevel(self, cur_level)) {
18775     return at::_ops::_foreach_div_ScalarList::call(self, scalars);
18776   }
18777 
18778   auto results = batch_rule(self, scalars);
18779   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18780 }
18781 template <typename batch_rule_t, batch_rule_t batch_rule>
18782 void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18783   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18784   auto maybe_layer = maybeCurrentDynamicLayer();
18785   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18786   int64_t cur_level = maybe_layer->layerId();
18787   if (!isBatchedAtLevel(self, cur_level)) {
18788     return at::_ops::_foreach_div__ScalarList::call(self, scalars);
18789   }
18790 
18791   batch_rule(self, scalars);
18792 }
18793 template <typename batch_rule_t, batch_rule_t batch_rule>
18794 ::std::vector<at::Tensor> _foreach_div_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
18795   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18796   auto maybe_layer = maybeCurrentDynamicLayer();
18797   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18798   int64_t cur_level = maybe_layer->layerId();
18799   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18800     return at::_ops::_foreach_div_Tensor::call(self, other);
18801   }
18802   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
18803   auto results = batch_rule(self, other_value, other_bdim);
18804   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18805 }
18806 template <typename batch_rule_t, batch_rule_t batch_rule>
18807 void _foreach_div__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
18808   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18809   auto maybe_layer = maybeCurrentDynamicLayer();
18810   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18811   int64_t cur_level = maybe_layer->layerId();
18812   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18813     return at::_ops::_foreach_div__Tensor::call(self, other);
18814   }
18815   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
18816   batch_rule(self, other_value, other_bdim);
18817 }
18818 template <typename batch_rule_t, batch_rule_t batch_rule>
18819 ::std::vector<at::Tensor> _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18820   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18821   auto maybe_layer = maybeCurrentDynamicLayer();
18822   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18823   int64_t cur_level = maybe_layer->layerId();
18824   if (!isBatchedAtLevel(self, cur_level)) {
18825     return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar);
18826   }
18827 
18828   auto results = batch_rule(self, scalar);
18829   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18830 }
18831 template <typename batch_rule_t, batch_rule_t batch_rule>
18832 void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18833   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18834   auto maybe_layer = maybeCurrentDynamicLayer();
18835   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18836   int64_t cur_level = maybe_layer->layerId();
18837   if (!isBatchedAtLevel(self, cur_level)) {
18838     return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar);
18839   }
18840 
18841   batch_rule(self, scalar);
18842 }
18843 template <typename batch_rule_t, batch_rule_t batch_rule>
18844 ::std::vector<at::Tensor> _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) {
18845   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18846   auto maybe_layer = maybeCurrentDynamicLayer();
18847   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18848   int64_t cur_level = maybe_layer->layerId();
18849   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18850     return at::_ops::_foreach_clamp_max_List::call(self, other);
18851   }
18852 
18853   auto results = batch_rule(self, other);
18854   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18855 }
18856 template <typename batch_rule_t, batch_rule_t batch_rule>
18857 void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) {
18858   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18859   auto maybe_layer = maybeCurrentDynamicLayer();
18860   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18861   int64_t cur_level = maybe_layer->layerId();
18862   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18863     return at::_ops::_foreach_clamp_max__List::call(self, other);
18864   }
18865 
18866   batch_rule(self, other);
18867 }
18868 template <typename batch_rule_t, batch_rule_t batch_rule>
18869 ::std::vector<at::Tensor> _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18870   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18871   auto maybe_layer = maybeCurrentDynamicLayer();
18872   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18873   int64_t cur_level = maybe_layer->layerId();
18874   if (!isBatchedAtLevel(self, cur_level)) {
18875     return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars);
18876   }
18877 
18878   auto results = batch_rule(self, scalars);
18879   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18880 }
18881 template <typename batch_rule_t, batch_rule_t batch_rule>
18882 void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18883   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18884   auto maybe_layer = maybeCurrentDynamicLayer();
18885   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18886   int64_t cur_level = maybe_layer->layerId();
18887   if (!isBatchedAtLevel(self, cur_level)) {
18888     return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars);
18889   }
18890 
18891   batch_rule(self, scalars);
18892 }
18893 template <typename batch_rule_t, batch_rule_t batch_rule>
18894 ::std::vector<at::Tensor> _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18895   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18896   auto maybe_layer = maybeCurrentDynamicLayer();
18897   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18898   int64_t cur_level = maybe_layer->layerId();
18899   if (!isBatchedAtLevel(self, cur_level)) {
18900     return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar);
18901   }
18902 
18903   auto results = batch_rule(self, scalar);
18904   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18905 }
18906 template <typename batch_rule_t, batch_rule_t batch_rule>
18907 void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18908   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18909   auto maybe_layer = maybeCurrentDynamicLayer();
18910   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18911   int64_t cur_level = maybe_layer->layerId();
18912   if (!isBatchedAtLevel(self, cur_level)) {
18913     return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar);
18914   }
18915 
18916   batch_rule(self, scalar);
18917 }
18918 template <typename batch_rule_t, batch_rule_t batch_rule>
18919 ::std::vector<at::Tensor> _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) {
18920   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18921   auto maybe_layer = maybeCurrentDynamicLayer();
18922   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18923   int64_t cur_level = maybe_layer->layerId();
18924   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18925     return at::_ops::_foreach_clamp_min_List::call(self, other);
18926   }
18927 
18928   auto results = batch_rule(self, other);
18929   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18930 }
18931 template <typename batch_rule_t, batch_rule_t batch_rule>
18932 void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) {
18933   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18934   auto maybe_layer = maybeCurrentDynamicLayer();
18935   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18936   int64_t cur_level = maybe_layer->layerId();
18937   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18938     return at::_ops::_foreach_clamp_min__List::call(self, other);
18939   }
18940 
18941   batch_rule(self, other);
18942 }
18943 template <typename batch_rule_t, batch_rule_t batch_rule>
18944 ::std::vector<at::Tensor> _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18945   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18946   auto maybe_layer = maybeCurrentDynamicLayer();
18947   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18948   int64_t cur_level = maybe_layer->layerId();
18949   if (!isBatchedAtLevel(self, cur_level)) {
18950     return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars);
18951   }
18952 
18953   auto results = batch_rule(self, scalars);
18954   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18955 }
18956 template <typename batch_rule_t, batch_rule_t batch_rule>
18957 void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
18958   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18959   auto maybe_layer = maybeCurrentDynamicLayer();
18960   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18961   int64_t cur_level = maybe_layer->layerId();
18962   if (!isBatchedAtLevel(self, cur_level)) {
18963     return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars);
18964   }
18965 
18966   batch_rule(self, scalars);
18967 }
18968 template <typename batch_rule_t, batch_rule_t batch_rule>
18969 ::std::vector<at::Tensor> _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18970   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18971   auto maybe_layer = maybeCurrentDynamicLayer();
18972   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18973   int64_t cur_level = maybe_layer->layerId();
18974   if (!isBatchedAtLevel(self, cur_level)) {
18975     return at::_ops::_foreach_maximum_Scalar::call(self, scalar);
18976   }
18977 
18978   auto results = batch_rule(self, scalar);
18979   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
18980 }
18981 template <typename batch_rule_t, batch_rule_t batch_rule>
18982 void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
18983   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18984   auto maybe_layer = maybeCurrentDynamicLayer();
18985   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
18986   int64_t cur_level = maybe_layer->layerId();
18987   if (!isBatchedAtLevel(self, cur_level)) {
18988     return at::_ops::_foreach_maximum__Scalar::call(self, scalar);
18989   }
18990 
18991   batch_rule(self, scalar);
18992 }
18993 template <typename batch_rule_t, batch_rule_t batch_rule>
18994 ::std::vector<at::Tensor> _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
18995   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18996   auto maybe_layer = maybeCurrentDynamicLayer();
18997   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18998   int64_t cur_level = maybe_layer->layerId();
18999   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19000     return at::_ops::_foreach_maximum_List::call(self, other);
19001   }
19002 
19003   auto results = batch_rule(self, other);
19004   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19005 }
19006 template <typename batch_rule_t, batch_rule_t batch_rule>
19007 void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
19008   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19009   auto maybe_layer = maybeCurrentDynamicLayer();
19010   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19011   int64_t cur_level = maybe_layer->layerId();
19012   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19013     return at::_ops::_foreach_maximum__List::call(self, other);
19014   }
19015 
19016   batch_rule(self, other);
19017 }
19018 template <typename batch_rule_t, batch_rule_t batch_rule>
19019 ::std::vector<at::Tensor> _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
19020   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19021   auto maybe_layer = maybeCurrentDynamicLayer();
19022   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19023   int64_t cur_level = maybe_layer->layerId();
19024   if (!isBatchedAtLevel(self, cur_level)) {
19025     return at::_ops::_foreach_maximum_ScalarList::call(self, scalars);
19026   }
19027 
19028   auto results = batch_rule(self, scalars);
19029   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19030 }
19031 template <typename batch_rule_t, batch_rule_t batch_rule>
19032 void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
19033   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19034   auto maybe_layer = maybeCurrentDynamicLayer();
19035   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19036   int64_t cur_level = maybe_layer->layerId();
19037   if (!isBatchedAtLevel(self, cur_level)) {
19038     return at::_ops::_foreach_maximum__ScalarList::call(self, scalars);
19039   }
19040 
19041   batch_rule(self, scalars);
19042 }
19043 template <typename batch_rule_t, batch_rule_t batch_rule>
19044 ::std::vector<at::Tensor> _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
19045   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19046   auto maybe_layer = maybeCurrentDynamicLayer();
19047   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19048   int64_t cur_level = maybe_layer->layerId();
19049   if (!isBatchedAtLevel(self, cur_level)) {
19050     return at::_ops::_foreach_minimum_Scalar::call(self, scalar);
19051   }
19052 
19053   auto results = batch_rule(self, scalar);
19054   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19055 }
19056 template <typename batch_rule_t, batch_rule_t batch_rule>
19057 void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
19058   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19059   auto maybe_layer = maybeCurrentDynamicLayer();
19060   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19061   int64_t cur_level = maybe_layer->layerId();
19062   if (!isBatchedAtLevel(self, cur_level)) {
19063     return at::_ops::_foreach_minimum__Scalar::call(self, scalar);
19064   }
19065 
19066   batch_rule(self, scalar);
19067 }
19068 template <typename batch_rule_t, batch_rule_t batch_rule>
19069 ::std::vector<at::Tensor> _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
19070   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19071   auto maybe_layer = maybeCurrentDynamicLayer();
19072   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19073   int64_t cur_level = maybe_layer->layerId();
19074   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19075     return at::_ops::_foreach_minimum_List::call(self, other);
19076   }
19077 
19078   auto results = batch_rule(self, other);
19079   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19080 }
19081 template <typename batch_rule_t, batch_rule_t batch_rule>
19082 void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
19083   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19084   auto maybe_layer = maybeCurrentDynamicLayer();
19085   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19086   int64_t cur_level = maybe_layer->layerId();
19087   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19088     return at::_ops::_foreach_minimum__List::call(self, other);
19089   }
19090 
19091   batch_rule(self, other);
19092 }
19093 template <typename batch_rule_t, batch_rule_t batch_rule>
19094 ::std::vector<at::Tensor> _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
19095   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19096   auto maybe_layer = maybeCurrentDynamicLayer();
19097   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19098   int64_t cur_level = maybe_layer->layerId();
19099   if (!isBatchedAtLevel(self, cur_level)) {
19100     return at::_ops::_foreach_minimum_ScalarList::call(self, scalars);
19101   }
19102 
19103   auto results = batch_rule(self, scalars);
19104   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19105 }
19106 template <typename batch_rule_t, batch_rule_t batch_rule>
19107 void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
19108   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19109   auto maybe_layer = maybeCurrentDynamicLayer();
19110   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19111   int64_t cur_level = maybe_layer->layerId();
19112   if (!isBatchedAtLevel(self, cur_level)) {
19113     return at::_ops::_foreach_minimum__ScalarList::call(self, scalars);
19114   }
19115 
19116   batch_rule(self, scalars);
19117 }
19118 template <typename batch_rule_t, batch_rule_t batch_rule>
19119 ::std::vector<at::Tensor> _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
19120   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19121   auto maybe_layer = maybeCurrentDynamicLayer();
19122   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19123   int64_t cur_level = maybe_layer->layerId();
19124   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19125     return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value);
19126   }
19127 
19128   auto results = batch_rule(self, tensor1, tensor2, value);
19129   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19130 }
19131 template <typename batch_rule_t, batch_rule_t batch_rule>
19132 ::std::vector<at::Tensor> _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
19133   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19134   auto maybe_layer = maybeCurrentDynamicLayer();
19135   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19136   int64_t cur_level = maybe_layer->layerId();
19137   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19138     return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars);
19139   }
19140 
19141   auto results = batch_rule(self, tensor1, tensor2, scalars);
19142   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19143 }
19144 template <typename batch_rule_t, batch_rule_t batch_rule>
19145 ::std::vector<at::Tensor> _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
19146   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19147   auto maybe_layer = maybeCurrentDynamicLayer();
19148   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19149   int64_t cur_level = maybe_layer->layerId();
19150   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
19151     return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars);
19152   }
19153   auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level);
19154   auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
19155   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19156 }
19157 template <typename batch_rule_t, batch_rule_t batch_rule>
19158 void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
19159   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19160   auto maybe_layer = maybeCurrentDynamicLayer();
19161   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19162   int64_t cur_level = maybe_layer->layerId();
19163   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19164     return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value);
19165   }
19166 
19167   batch_rule(self, tensor1, tensor2, value);
19168 }
19169 template <typename batch_rule_t, batch_rule_t batch_rule>
19170 void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
19171   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19172   auto maybe_layer = maybeCurrentDynamicLayer();
19173   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19174   int64_t cur_level = maybe_layer->layerId();
19175   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19176     return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars);
19177   }
19178 
19179   batch_rule(self, tensor1, tensor2, scalars);
19180 }
19181 template <typename batch_rule_t, batch_rule_t batch_rule>
19182 void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
19183   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19184   auto maybe_layer = maybeCurrentDynamicLayer();
19185   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19186   int64_t cur_level = maybe_layer->layerId();
19187   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
19188     return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars);
19189   }
19190   auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level);
19191   batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
19192 }
19193 template <typename batch_rule_t, batch_rule_t batch_rule>
19194 ::std::vector<at::Tensor> _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
19195   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19196   auto maybe_layer = maybeCurrentDynamicLayer();
19197   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19198   int64_t cur_level = maybe_layer->layerId();
19199   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19200     return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
19201   }
19202 
19203   auto results = batch_rule(self, tensor1, tensor2, value);
19204   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19205 }
19206 template <typename batch_rule_t, batch_rule_t batch_rule>
19207 ::std::vector<at::Tensor> _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
19208   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19209   auto maybe_layer = maybeCurrentDynamicLayer();
19210   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19211   int64_t cur_level = maybe_layer->layerId();
19212   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19213     return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
19214   }
19215 
19216   auto results = batch_rule(self, tensor1, tensor2, scalars);
19217   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19218 }
19219 template <typename batch_rule_t, batch_rule_t batch_rule>
19220 ::std::vector<at::Tensor> _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
19221   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19222   auto maybe_layer = maybeCurrentDynamicLayer();
19223   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19224   int64_t cur_level = maybe_layer->layerId();
19225   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
19226     return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
19227   }
19228   auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level);
19229   auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
19230   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19231 }
19232 template <typename batch_rule_t, batch_rule_t batch_rule>
19233 void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
19234   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19235   auto maybe_layer = maybeCurrentDynamicLayer();
19236   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19237   int64_t cur_level = maybe_layer->layerId();
19238   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19239     return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
19240   }
19241 
19242   batch_rule(self, tensor1, tensor2, value);
19243 }
19244 template <typename batch_rule_t, batch_rule_t batch_rule>
19245 void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
19246   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19247   auto maybe_layer = maybeCurrentDynamicLayer();
19248   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19249   int64_t cur_level = maybe_layer->layerId();
19250   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19251     return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
19252   }
19253 
19254   batch_rule(self, tensor1, tensor2, scalars);
19255 }
19256 template <typename batch_rule_t, batch_rule_t batch_rule>
19257 void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
19258   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19259   auto maybe_layer = maybeCurrentDynamicLayer();
19260   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19261   int64_t cur_level = maybe_layer->layerId();
19262   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
19263     return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
19264   }
19265   auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level);
19266   batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
19267 }
19268 template <typename batch_rule_t, batch_rule_t batch_rule>
19269 ::std::vector<at::Tensor> _foreach_abs_generated_plumbing(at::TensorList self) {
19270   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19271   auto maybe_layer = maybeCurrentDynamicLayer();
19272   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19273   int64_t cur_level = maybe_layer->layerId();
19274   if (!isBatchedAtLevel(self, cur_level)) {
19275     return at::_ops::_foreach_abs::call(self);
19276   }
19277 
19278   auto results = batch_rule(self);
19279   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19280 }
19281 template <typename batch_rule_t, batch_rule_t batch_rule>
19282 void _foreach_abs__generated_plumbing(at::TensorList self) {
19283   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19284   auto maybe_layer = maybeCurrentDynamicLayer();
19285   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19286   int64_t cur_level = maybe_layer->layerId();
19287   if (!isBatchedAtLevel(self, cur_level)) {
19288     return at::_ops::_foreach_abs_::call(self);
19289   }
19290 
19291   batch_rule(self);
19292 }
19293 template <typename batch_rule_t, batch_rule_t batch_rule>
19294 ::std::vector<at::Tensor> _foreach_acos_generated_plumbing(at::TensorList self) {
19295   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19296   auto maybe_layer = maybeCurrentDynamicLayer();
19297   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19298   int64_t cur_level = maybe_layer->layerId();
19299   if (!isBatchedAtLevel(self, cur_level)) {
19300     return at::_ops::_foreach_acos::call(self);
19301   }
19302 
19303   auto results = batch_rule(self);
19304   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19305 }
19306 template <typename batch_rule_t, batch_rule_t batch_rule>
19307 void _foreach_acos__generated_plumbing(at::TensorList self) {
19308   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19309   auto maybe_layer = maybeCurrentDynamicLayer();
19310   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19311   int64_t cur_level = maybe_layer->layerId();
19312   if (!isBatchedAtLevel(self, cur_level)) {
19313     return at::_ops::_foreach_acos_::call(self);
19314   }
19315 
19316   batch_rule(self);
19317 }
19318 template <typename batch_rule_t, batch_rule_t batch_rule>
19319 ::std::vector<at::Tensor> _foreach_asin_generated_plumbing(at::TensorList self) {
19320   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19321   auto maybe_layer = maybeCurrentDynamicLayer();
19322   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19323   int64_t cur_level = maybe_layer->layerId();
19324   if (!isBatchedAtLevel(self, cur_level)) {
19325     return at::_ops::_foreach_asin::call(self);
19326   }
19327 
19328   auto results = batch_rule(self);
19329   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19330 }
19331 template <typename batch_rule_t, batch_rule_t batch_rule>
19332 void _foreach_asin__generated_plumbing(at::TensorList self) {
19333   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19334   auto maybe_layer = maybeCurrentDynamicLayer();
19335   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19336   int64_t cur_level = maybe_layer->layerId();
19337   if (!isBatchedAtLevel(self, cur_level)) {
19338     return at::_ops::_foreach_asin_::call(self);
19339   }
19340 
19341   batch_rule(self);
19342 }
19343 template <typename batch_rule_t, batch_rule_t batch_rule>
19344 ::std::vector<at::Tensor> _foreach_atan_generated_plumbing(at::TensorList self) {
19345   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19346   auto maybe_layer = maybeCurrentDynamicLayer();
19347   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19348   int64_t cur_level = maybe_layer->layerId();
19349   if (!isBatchedAtLevel(self, cur_level)) {
19350     return at::_ops::_foreach_atan::call(self);
19351   }
19352 
19353   auto results = batch_rule(self);
19354   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19355 }
19356 template <typename batch_rule_t, batch_rule_t batch_rule>
19357 void _foreach_atan__generated_plumbing(at::TensorList self) {
19358   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19359   auto maybe_layer = maybeCurrentDynamicLayer();
19360   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19361   int64_t cur_level = maybe_layer->layerId();
19362   if (!isBatchedAtLevel(self, cur_level)) {
19363     return at::_ops::_foreach_atan_::call(self);
19364   }
19365 
19366   batch_rule(self);
19367 }
19368 template <typename batch_rule_t, batch_rule_t batch_rule>
19369 ::std::vector<at::Tensor> _foreach_ceil_generated_plumbing(at::TensorList self) {
19370   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19371   auto maybe_layer = maybeCurrentDynamicLayer();
19372   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19373   int64_t cur_level = maybe_layer->layerId();
19374   if (!isBatchedAtLevel(self, cur_level)) {
19375     return at::_ops::_foreach_ceil::call(self);
19376   }
19377 
19378   auto results = batch_rule(self);
19379   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19380 }
19381 template <typename batch_rule_t, batch_rule_t batch_rule>
19382 void _foreach_ceil__generated_plumbing(at::TensorList self) {
19383   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19384   auto maybe_layer = maybeCurrentDynamicLayer();
19385   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19386   int64_t cur_level = maybe_layer->layerId();
19387   if (!isBatchedAtLevel(self, cur_level)) {
19388     return at::_ops::_foreach_ceil_::call(self);
19389   }
19390 
19391   batch_rule(self);
19392 }
19393 template <typename batch_rule_t, batch_rule_t batch_rule>
19394 ::std::vector<at::Tensor> _foreach_cos_generated_plumbing(at::TensorList self) {
19395   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19396   auto maybe_layer = maybeCurrentDynamicLayer();
19397   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19398   int64_t cur_level = maybe_layer->layerId();
19399   if (!isBatchedAtLevel(self, cur_level)) {
19400     return at::_ops::_foreach_cos::call(self);
19401   }
19402 
19403   auto results = batch_rule(self);
19404   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19405 }
19406 template <typename batch_rule_t, batch_rule_t batch_rule>
19407 void _foreach_cos__generated_plumbing(at::TensorList self) {
19408   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19409   auto maybe_layer = maybeCurrentDynamicLayer();
19410   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19411   int64_t cur_level = maybe_layer->layerId();
19412   if (!isBatchedAtLevel(self, cur_level)) {
19413     return at::_ops::_foreach_cos_::call(self);
19414   }
19415 
19416   batch_rule(self);
19417 }
19418 template <typename batch_rule_t, batch_rule_t batch_rule>
19419 ::std::vector<at::Tensor> _foreach_cosh_generated_plumbing(at::TensorList self) {
19420   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19421   auto maybe_layer = maybeCurrentDynamicLayer();
19422   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19423   int64_t cur_level = maybe_layer->layerId();
19424   if (!isBatchedAtLevel(self, cur_level)) {
19425     return at::_ops::_foreach_cosh::call(self);
19426   }
19427 
19428   auto results = batch_rule(self);
19429   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19430 }
19431 template <typename batch_rule_t, batch_rule_t batch_rule>
19432 void _foreach_cosh__generated_plumbing(at::TensorList self) {
19433   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19434   auto maybe_layer = maybeCurrentDynamicLayer();
19435   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19436   int64_t cur_level = maybe_layer->layerId();
19437   if (!isBatchedAtLevel(self, cur_level)) {
19438     return at::_ops::_foreach_cosh_::call(self);
19439   }
19440 
19441   batch_rule(self);
19442 }
19443 template <typename batch_rule_t, batch_rule_t batch_rule>
19444 ::std::vector<at::Tensor> _foreach_erf_generated_plumbing(at::TensorList self) {
19445   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19446   auto maybe_layer = maybeCurrentDynamicLayer();
19447   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19448   int64_t cur_level = maybe_layer->layerId();
19449   if (!isBatchedAtLevel(self, cur_level)) {
19450     return at::_ops::_foreach_erf::call(self);
19451   }
19452 
19453   auto results = batch_rule(self);
19454   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19455 }
19456 template <typename batch_rule_t, batch_rule_t batch_rule>
19457 void _foreach_erf__generated_plumbing(at::TensorList self) {
19458   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19459   auto maybe_layer = maybeCurrentDynamicLayer();
19460   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19461   int64_t cur_level = maybe_layer->layerId();
19462   if (!isBatchedAtLevel(self, cur_level)) {
19463     return at::_ops::_foreach_erf_::call(self);
19464   }
19465 
19466   batch_rule(self);
19467 }
19468 template <typename batch_rule_t, batch_rule_t batch_rule>
19469 ::std::vector<at::Tensor> _foreach_erfc_generated_plumbing(at::TensorList self) {
19470   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19471   auto maybe_layer = maybeCurrentDynamicLayer();
19472   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19473   int64_t cur_level = maybe_layer->layerId();
19474   if (!isBatchedAtLevel(self, cur_level)) {
19475     return at::_ops::_foreach_erfc::call(self);
19476   }
19477 
19478   auto results = batch_rule(self);
19479   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19480 }
19481 template <typename batch_rule_t, batch_rule_t batch_rule>
19482 void _foreach_erfc__generated_plumbing(at::TensorList self) {
19483   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19484   auto maybe_layer = maybeCurrentDynamicLayer();
19485   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19486   int64_t cur_level = maybe_layer->layerId();
19487   if (!isBatchedAtLevel(self, cur_level)) {
19488     return at::_ops::_foreach_erfc_::call(self);
19489   }
19490 
19491   batch_rule(self);
19492 }
19493 template <typename batch_rule_t, batch_rule_t batch_rule>
19494 ::std::vector<at::Tensor> _foreach_exp_generated_plumbing(at::TensorList self) {
19495   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19496   auto maybe_layer = maybeCurrentDynamicLayer();
19497   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19498   int64_t cur_level = maybe_layer->layerId();
19499   if (!isBatchedAtLevel(self, cur_level)) {
19500     return at::_ops::_foreach_exp::call(self);
19501   }
19502 
19503   auto results = batch_rule(self);
19504   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19505 }
19506 template <typename batch_rule_t, batch_rule_t batch_rule>
19507 void _foreach_exp__generated_plumbing(at::TensorList self) {
19508   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19509   auto maybe_layer = maybeCurrentDynamicLayer();
19510   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19511   int64_t cur_level = maybe_layer->layerId();
19512   if (!isBatchedAtLevel(self, cur_level)) {
19513     return at::_ops::_foreach_exp_::call(self);
19514   }
19515 
19516   batch_rule(self);
19517 }
19518 template <typename batch_rule_t, batch_rule_t batch_rule>
19519 ::std::vector<at::Tensor> _foreach_expm1_generated_plumbing(at::TensorList self) {
19520   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19521   auto maybe_layer = maybeCurrentDynamicLayer();
19522   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19523   int64_t cur_level = maybe_layer->layerId();
19524   if (!isBatchedAtLevel(self, cur_level)) {
19525     return at::_ops::_foreach_expm1::call(self);
19526   }
19527 
19528   auto results = batch_rule(self);
19529   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19530 }
19531 template <typename batch_rule_t, batch_rule_t batch_rule>
19532 void _foreach_expm1__generated_plumbing(at::TensorList self) {
19533   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19534   auto maybe_layer = maybeCurrentDynamicLayer();
19535   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19536   int64_t cur_level = maybe_layer->layerId();
19537   if (!isBatchedAtLevel(self, cur_level)) {
19538     return at::_ops::_foreach_expm1_::call(self);
19539   }
19540 
19541   batch_rule(self);
19542 }
19543 template <typename batch_rule_t, batch_rule_t batch_rule>
19544 ::std::vector<at::Tensor> _foreach_floor_generated_plumbing(at::TensorList self) {
19545   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19546   auto maybe_layer = maybeCurrentDynamicLayer();
19547   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19548   int64_t cur_level = maybe_layer->layerId();
19549   if (!isBatchedAtLevel(self, cur_level)) {
19550     return at::_ops::_foreach_floor::call(self);
19551   }
19552 
19553   auto results = batch_rule(self);
19554   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19555 }
19556 template <typename batch_rule_t, batch_rule_t batch_rule>
19557 void _foreach_floor__generated_plumbing(at::TensorList self) {
19558   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19559   auto maybe_layer = maybeCurrentDynamicLayer();
19560   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19561   int64_t cur_level = maybe_layer->layerId();
19562   if (!isBatchedAtLevel(self, cur_level)) {
19563     return at::_ops::_foreach_floor_::call(self);
19564   }
19565 
19566   batch_rule(self);
19567 }
19568 template <typename batch_rule_t, batch_rule_t batch_rule>
19569 ::std::vector<at::Tensor> _foreach_frac_generated_plumbing(at::TensorList self) {
19570   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19571   auto maybe_layer = maybeCurrentDynamicLayer();
19572   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19573   int64_t cur_level = maybe_layer->layerId();
19574   if (!isBatchedAtLevel(self, cur_level)) {
19575     return at::_ops::_foreach_frac::call(self);
19576   }
19577 
19578   auto results = batch_rule(self);
19579   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19580 }
19581 template <typename batch_rule_t, batch_rule_t batch_rule>
19582 void _foreach_frac__generated_plumbing(at::TensorList self) {
19583   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19584   auto maybe_layer = maybeCurrentDynamicLayer();
19585   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19586   int64_t cur_level = maybe_layer->layerId();
19587   if (!isBatchedAtLevel(self, cur_level)) {
19588     return at::_ops::_foreach_frac_::call(self);
19589   }
19590 
19591   batch_rule(self);
19592 }
19593 template <typename batch_rule_t, batch_rule_t batch_rule>
19594 ::std::vector<at::Tensor> _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
19595   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19596   auto maybe_layer = maybeCurrentDynamicLayer();
19597   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19598   int64_t cur_level = maybe_layer->layerId();
19599   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
19600     return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
19601   }
19602 
19603   auto results = batch_rule(self, tensors1, weights);
19604   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19605 }
19606 template <typename batch_rule_t, batch_rule_t batch_rule>
19607 void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
19608   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19609   auto maybe_layer = maybeCurrentDynamicLayer();
19610   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19611   int64_t cur_level = maybe_layer->layerId();
19612   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
19613     return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
19614   }
19615 
19616   batch_rule(self, tensors1, weights);
19617 }
19618 template <typename batch_rule_t, batch_rule_t batch_rule>
19619 ::std::vector<at::Tensor> _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
19620   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19621   auto maybe_layer = maybeCurrentDynamicLayer();
19622   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19623   int64_t cur_level = maybe_layer->layerId();
19624   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
19625     return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
19626   }
19627 
19628   auto results = batch_rule(self, tensors1, weight);
19629   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19630 }
19631 template <typename batch_rule_t, batch_rule_t batch_rule>
19632 void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
19633   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19634   auto maybe_layer = maybeCurrentDynamicLayer();
19635   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19636   int64_t cur_level = maybe_layer->layerId();
19637   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
19638     return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
19639   }
19640 
19641   batch_rule(self, tensors1, weight);
19642 }
19643 template <typename batch_rule_t, batch_rule_t batch_rule>
19644 ::std::vector<at::Tensor> _foreach_lerp_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
19645   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19646   auto maybe_layer = maybeCurrentDynamicLayer();
19647   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19648   int64_t cur_level = maybe_layer->layerId();
19649   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
19650     return at::_ops::_foreach_lerp_ScalarList::call(self, tensors1, weight);
19651   }
19652 
19653   auto results = batch_rule(self, tensors1, weight);
19654   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19655 }
19656 template <typename batch_rule_t, batch_rule_t batch_rule>
19657 void _foreach_lerp__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
19658   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19659   auto maybe_layer = maybeCurrentDynamicLayer();
19660   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19661   int64_t cur_level = maybe_layer->layerId();
19662   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
19663     return at::_ops::_foreach_lerp__ScalarList::call(self, tensors1, weight);
19664   }
19665 
19666   batch_rule(self, tensors1, weight);
19667 }
19668 template <typename batch_rule_t, batch_rule_t batch_rule>
19669 ::std::vector<at::Tensor> _foreach_lgamma_generated_plumbing(at::TensorList self) {
19670   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19671   auto maybe_layer = maybeCurrentDynamicLayer();
19672   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19673   int64_t cur_level = maybe_layer->layerId();
19674   if (!isBatchedAtLevel(self, cur_level)) {
19675     return at::_ops::_foreach_lgamma::call(self);
19676   }
19677 
19678   auto results = batch_rule(self);
19679   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19680 }
19681 template <typename batch_rule_t, batch_rule_t batch_rule>
19682 void _foreach_lgamma__generated_plumbing(at::TensorList self) {
19683   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19684   auto maybe_layer = maybeCurrentDynamicLayer();
19685   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19686   int64_t cur_level = maybe_layer->layerId();
19687   if (!isBatchedAtLevel(self, cur_level)) {
19688     return at::_ops::_foreach_lgamma_::call(self);
19689   }
19690 
19691   batch_rule(self);
19692 }
19693 template <typename batch_rule_t, batch_rule_t batch_rule>
19694 ::std::vector<at::Tensor> _foreach_log_generated_plumbing(at::TensorList self) {
19695   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19696   auto maybe_layer = maybeCurrentDynamicLayer();
19697   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19698   int64_t cur_level = maybe_layer->layerId();
19699   if (!isBatchedAtLevel(self, cur_level)) {
19700     return at::_ops::_foreach_log::call(self);
19701   }
19702 
19703   auto results = batch_rule(self);
19704   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19705 }
19706 template <typename batch_rule_t, batch_rule_t batch_rule>
19707 void _foreach_log__generated_plumbing(at::TensorList self) {
19708   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19709   auto maybe_layer = maybeCurrentDynamicLayer();
19710   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19711   int64_t cur_level = maybe_layer->layerId();
19712   if (!isBatchedAtLevel(self, cur_level)) {
19713     return at::_ops::_foreach_log_::call(self);
19714   }
19715 
19716   batch_rule(self);
19717 }
19718 template <typename batch_rule_t, batch_rule_t batch_rule>
19719 ::std::vector<at::Tensor> _foreach_log10_generated_plumbing(at::TensorList self) {
19720   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19721   auto maybe_layer = maybeCurrentDynamicLayer();
19722   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19723   int64_t cur_level = maybe_layer->layerId();
19724   if (!isBatchedAtLevel(self, cur_level)) {
19725     return at::_ops::_foreach_log10::call(self);
19726   }
19727 
19728   auto results = batch_rule(self);
19729   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19730 }
19731 template <typename batch_rule_t, batch_rule_t batch_rule>
19732 void _foreach_log10__generated_plumbing(at::TensorList self) {
19733   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19734   auto maybe_layer = maybeCurrentDynamicLayer();
19735   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19736   int64_t cur_level = maybe_layer->layerId();
19737   if (!isBatchedAtLevel(self, cur_level)) {
19738     return at::_ops::_foreach_log10_::call(self);
19739   }
19740 
19741   batch_rule(self);
19742 }
19743 template <typename batch_rule_t, batch_rule_t batch_rule>
19744 ::std::vector<at::Tensor> _foreach_log1p_generated_plumbing(at::TensorList self) {
19745   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19746   auto maybe_layer = maybeCurrentDynamicLayer();
19747   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19748   int64_t cur_level = maybe_layer->layerId();
19749   if (!isBatchedAtLevel(self, cur_level)) {
19750     return at::_ops::_foreach_log1p::call(self);
19751   }
19752 
19753   auto results = batch_rule(self);
19754   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19755 }
19756 template <typename batch_rule_t, batch_rule_t batch_rule>
19757 void _foreach_log1p__generated_plumbing(at::TensorList self) {
19758   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19759   auto maybe_layer = maybeCurrentDynamicLayer();
19760   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19761   int64_t cur_level = maybe_layer->layerId();
19762   if (!isBatchedAtLevel(self, cur_level)) {
19763     return at::_ops::_foreach_log1p_::call(self);
19764   }
19765 
19766   batch_rule(self);
19767 }
19768 template <typename batch_rule_t, batch_rule_t batch_rule>
19769 ::std::vector<at::Tensor> _foreach_log2_generated_plumbing(at::TensorList self) {
19770   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19771   auto maybe_layer = maybeCurrentDynamicLayer();
19772   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19773   int64_t cur_level = maybe_layer->layerId();
19774   if (!isBatchedAtLevel(self, cur_level)) {
19775     return at::_ops::_foreach_log2::call(self);
19776   }
19777 
19778   auto results = batch_rule(self);
19779   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19780 }
19781 template <typename batch_rule_t, batch_rule_t batch_rule>
19782 void _foreach_log2__generated_plumbing(at::TensorList self) {
19783   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19784   auto maybe_layer = maybeCurrentDynamicLayer();
19785   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19786   int64_t cur_level = maybe_layer->layerId();
19787   if (!isBatchedAtLevel(self, cur_level)) {
19788     return at::_ops::_foreach_log2_::call(self);
19789   }
19790 
19791   batch_rule(self);
19792 }
19793 template <typename batch_rule_t, batch_rule_t batch_rule>
19794 ::std::vector<at::Tensor> _foreach_max_generated_plumbing(at::TensorList self) {
19795   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19796   auto maybe_layer = maybeCurrentDynamicLayer();
19797   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19798   int64_t cur_level = maybe_layer->layerId();
19799   if (!isBatchedAtLevel(self, cur_level)) {
19800     return at::_ops::_foreach_max::call(self);
19801   }
19802 
19803   auto results = batch_rule(self);
19804   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19805 }
19806 template <typename batch_rule_t, batch_rule_t batch_rule>
19807 ::std::vector<at::Tensor> _foreach_neg_generated_plumbing(at::TensorList self) {
19808   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19809   auto maybe_layer = maybeCurrentDynamicLayer();
19810   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19811   int64_t cur_level = maybe_layer->layerId();
19812   if (!isBatchedAtLevel(self, cur_level)) {
19813     return at::_ops::_foreach_neg::call(self);
19814   }
19815 
19816   auto results = batch_rule(self);
19817   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19818 }
19819 template <typename batch_rule_t, batch_rule_t batch_rule>
19820 void _foreach_neg__generated_plumbing(at::TensorList self) {
19821   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19822   auto maybe_layer = maybeCurrentDynamicLayer();
19823   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19824   int64_t cur_level = maybe_layer->layerId();
19825   if (!isBatchedAtLevel(self, cur_level)) {
19826     return at::_ops::_foreach_neg_::call(self);
19827   }
19828 
19829   batch_rule(self);
19830 }
19831 template <typename batch_rule_t, batch_rule_t batch_rule>
19832 ::std::vector<at::Tensor> _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype) {
19833   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19834   auto maybe_layer = maybeCurrentDynamicLayer();
19835   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19836   int64_t cur_level = maybe_layer->layerId();
19837   if (!isBatchedAtLevel(self, cur_level)) {
19838     return at::_ops::_foreach_norm_Scalar::call(self, ord, dtype);
19839   }
19840 
19841   auto results = batch_rule(self, ord, dtype);
19842   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19843 }
19844 template <typename batch_rule_t, batch_rule_t batch_rule>
19845 ::std::vector<at::Tensor> _foreach_pow_List_generated_plumbing(at::TensorList self, at::TensorList exponent) {
19846   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19847   auto maybe_layer = maybeCurrentDynamicLayer();
19848   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19849   int64_t cur_level = maybe_layer->layerId();
19850   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
19851     return at::_ops::_foreach_pow_List::call(self, exponent);
19852   }
19853 
19854   auto results = batch_rule(self, exponent);
19855   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19856 }
19857 template <typename batch_rule_t, batch_rule_t batch_rule>
19858 ::std::vector<at::Tensor> _foreach_pow_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) {
19859   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19860   auto maybe_layer = maybeCurrentDynamicLayer();
19861   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19862   int64_t cur_level = maybe_layer->layerId();
19863   if (!isBatchedAtLevel(self, cur_level)) {
19864     return at::_ops::_foreach_pow_Scalar::call(self, exponent);
19865   }
19866 
19867   auto results = batch_rule(self, exponent);
19868   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19869 }
19870 template <typename batch_rule_t, batch_rule_t batch_rule>
19871 ::std::vector<at::Tensor> _foreach_pow_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
19872   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19873   auto maybe_layer = maybeCurrentDynamicLayer();
19874   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19875   int64_t cur_level = maybe_layer->layerId();
19876   if (!isBatchedAtLevel(self, cur_level)) {
19877     return at::_ops::_foreach_pow_ScalarList::call(self, exponent);
19878   }
19879 
19880   auto results = batch_rule(self, exponent);
19881   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19882 }
19883 template <typename batch_rule_t, batch_rule_t batch_rule>
19884 ::std::vector<at::Tensor> _foreach_pow_ScalarAndTensor_generated_plumbing(const at::Scalar & self, at::TensorList exponent) {
19885   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19886   auto maybe_layer = maybeCurrentDynamicLayer();
19887   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19888   int64_t cur_level = maybe_layer->layerId();
19889   if (!isBatchedAtLevel(exponent, cur_level)) {
19890     return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent);
19891   }
19892 
19893   auto results = batch_rule(self, exponent);
19894   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19895 }
19896 template <typename batch_rule_t, batch_rule_t batch_rule>
19897 void _foreach_pow__List_generated_plumbing(at::TensorList self, at::TensorList exponent) {
19898   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19899   auto maybe_layer = maybeCurrentDynamicLayer();
19900   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19901   int64_t cur_level = maybe_layer->layerId();
19902   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
19903     return at::_ops::_foreach_pow__List::call(self, exponent);
19904   }
19905 
19906   batch_rule(self, exponent);
19907 }
19908 template <typename batch_rule_t, batch_rule_t batch_rule>
19909 void _foreach_pow__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) {
19910   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19911   auto maybe_layer = maybeCurrentDynamicLayer();
19912   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19913   int64_t cur_level = maybe_layer->layerId();
19914   if (!isBatchedAtLevel(self, cur_level)) {
19915     return at::_ops::_foreach_pow__Scalar::call(self, exponent);
19916   }
19917 
19918   batch_rule(self, exponent);
19919 }
19920 template <typename batch_rule_t, batch_rule_t batch_rule>
19921 void _foreach_pow__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
19922   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19923   auto maybe_layer = maybeCurrentDynamicLayer();
19924   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19925   int64_t cur_level = maybe_layer->layerId();
19926   if (!isBatchedAtLevel(self, cur_level)) {
19927     return at::_ops::_foreach_pow__ScalarList::call(self, exponent);
19928   }
19929 
19930   batch_rule(self, exponent);
19931 }
19932 template <typename batch_rule_t, batch_rule_t batch_rule>
19933 ::std::vector<at::Tensor> _foreach_reciprocal_generated_plumbing(at::TensorList self) {
19934   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19935   auto maybe_layer = maybeCurrentDynamicLayer();
19936   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19937   int64_t cur_level = maybe_layer->layerId();
19938   if (!isBatchedAtLevel(self, cur_level)) {
19939     return at::_ops::_foreach_reciprocal::call(self);
19940   }
19941 
19942   auto results = batch_rule(self);
19943   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19944 }
19945 template <typename batch_rule_t, batch_rule_t batch_rule>
19946 void _foreach_reciprocal__generated_plumbing(at::TensorList self) {
19947   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19948   auto maybe_layer = maybeCurrentDynamicLayer();
19949   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19950   int64_t cur_level = maybe_layer->layerId();
19951   if (!isBatchedAtLevel(self, cur_level)) {
19952     return at::_ops::_foreach_reciprocal_::call(self);
19953   }
19954 
19955   batch_rule(self);
19956 }
19957 template <typename batch_rule_t, batch_rule_t batch_rule>
19958 ::std::vector<at::Tensor> _foreach_round_generated_plumbing(at::TensorList self) {
19959   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19960   auto maybe_layer = maybeCurrentDynamicLayer();
19961   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19962   int64_t cur_level = maybe_layer->layerId();
19963   if (!isBatchedAtLevel(self, cur_level)) {
19964     return at::_ops::_foreach_round::call(self);
19965   }
19966 
19967   auto results = batch_rule(self);
19968   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19969 }
19970 template <typename batch_rule_t, batch_rule_t batch_rule>
19971 void _foreach_round__generated_plumbing(at::TensorList self) {
19972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19973   auto maybe_layer = maybeCurrentDynamicLayer();
19974   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19975   int64_t cur_level = maybe_layer->layerId();
19976   if (!isBatchedAtLevel(self, cur_level)) {
19977     return at::_ops::_foreach_round_::call(self);
19978   }
19979 
19980   batch_rule(self);
19981 }
19982 template <typename batch_rule_t, batch_rule_t batch_rule>
19983 ::std::vector<at::Tensor> _foreach_rsqrt_generated_plumbing(at::TensorList self) {
19984   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19985   auto maybe_layer = maybeCurrentDynamicLayer();
19986   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19987   int64_t cur_level = maybe_layer->layerId();
19988   if (!isBatchedAtLevel(self, cur_level)) {
19989     return at::_ops::_foreach_rsqrt::call(self);
19990   }
19991 
19992   auto results = batch_rule(self);
19993   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19994 }
19995 template <typename batch_rule_t, batch_rule_t batch_rule>
19996 void _foreach_rsqrt__generated_plumbing(at::TensorList self) {
19997   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19998   auto maybe_layer = maybeCurrentDynamicLayer();
19999   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20000   int64_t cur_level = maybe_layer->layerId();
20001   if (!isBatchedAtLevel(self, cur_level)) {
20002     return at::_ops::_foreach_rsqrt_::call(self);
20003   }
20004 
20005   batch_rule(self);
20006 }
20007 template <typename batch_rule_t, batch_rule_t batch_rule>
20008 ::std::vector<at::Tensor> _foreach_sigmoid_generated_plumbing(at::TensorList self) {
20009   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20010   auto maybe_layer = maybeCurrentDynamicLayer();
20011   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20012   int64_t cur_level = maybe_layer->layerId();
20013   if (!isBatchedAtLevel(self, cur_level)) {
20014     return at::_ops::_foreach_sigmoid::call(self);
20015   }
20016 
20017   auto results = batch_rule(self);
20018   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20019 }
20020 template <typename batch_rule_t, batch_rule_t batch_rule>
20021 void _foreach_sigmoid__generated_plumbing(at::TensorList self) {
20022   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20023   auto maybe_layer = maybeCurrentDynamicLayer();
20024   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20025   int64_t cur_level = maybe_layer->layerId();
20026   if (!isBatchedAtLevel(self, cur_level)) {
20027     return at::_ops::_foreach_sigmoid_::call(self);
20028   }
20029 
20030   batch_rule(self);
20031 }
20032 template <typename batch_rule_t, batch_rule_t batch_rule>
20033 ::std::vector<at::Tensor> _foreach_sign_generated_plumbing(at::TensorList self) {
20034   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20035   auto maybe_layer = maybeCurrentDynamicLayer();
20036   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20037   int64_t cur_level = maybe_layer->layerId();
20038   if (!isBatchedAtLevel(self, cur_level)) {
20039     return at::_ops::_foreach_sign::call(self);
20040   }
20041 
20042   auto results = batch_rule(self);
20043   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20044 }
20045 template <typename batch_rule_t, batch_rule_t batch_rule>
20046 void _foreach_sign__generated_plumbing(at::TensorList self) {
20047   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20048   auto maybe_layer = maybeCurrentDynamicLayer();
20049   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20050   int64_t cur_level = maybe_layer->layerId();
20051   if (!isBatchedAtLevel(self, cur_level)) {
20052     return at::_ops::_foreach_sign_::call(self);
20053   }
20054 
20055   batch_rule(self);
20056 }
20057 template <typename batch_rule_t, batch_rule_t batch_rule>
20058 ::std::vector<at::Tensor> _foreach_sin_generated_plumbing(at::TensorList self) {
20059   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20060   auto maybe_layer = maybeCurrentDynamicLayer();
20061   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20062   int64_t cur_level = maybe_layer->layerId();
20063   if (!isBatchedAtLevel(self, cur_level)) {
20064     return at::_ops::_foreach_sin::call(self);
20065   }
20066 
20067   auto results = batch_rule(self);
20068   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20069 }
20070 template <typename batch_rule_t, batch_rule_t batch_rule>
20071 void _foreach_sin__generated_plumbing(at::TensorList self) {
20072   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20073   auto maybe_layer = maybeCurrentDynamicLayer();
20074   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20075   int64_t cur_level = maybe_layer->layerId();
20076   if (!isBatchedAtLevel(self, cur_level)) {
20077     return at::_ops::_foreach_sin_::call(self);
20078   }
20079 
20080   batch_rule(self);
20081 }
20082 template <typename batch_rule_t, batch_rule_t batch_rule>
20083 ::std::vector<at::Tensor> _foreach_sinh_generated_plumbing(at::TensorList self) {
20084   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20085   auto maybe_layer = maybeCurrentDynamicLayer();
20086   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20087   int64_t cur_level = maybe_layer->layerId();
20088   if (!isBatchedAtLevel(self, cur_level)) {
20089     return at::_ops::_foreach_sinh::call(self);
20090   }
20091 
20092   auto results = batch_rule(self);
20093   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20094 }
20095 template <typename batch_rule_t, batch_rule_t batch_rule>
20096 void _foreach_sinh__generated_plumbing(at::TensorList self) {
20097   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20098   auto maybe_layer = maybeCurrentDynamicLayer();
20099   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20100   int64_t cur_level = maybe_layer->layerId();
20101   if (!isBatchedAtLevel(self, cur_level)) {
20102     return at::_ops::_foreach_sinh_::call(self);
20103   }
20104 
20105   batch_rule(self);
20106 }
20107 template <typename batch_rule_t, batch_rule_t batch_rule>
20108 ::std::vector<at::Tensor> _foreach_sqrt_generated_plumbing(at::TensorList self) {
20109   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20110   auto maybe_layer = maybeCurrentDynamicLayer();
20111   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20112   int64_t cur_level = maybe_layer->layerId();
20113   if (!isBatchedAtLevel(self, cur_level)) {
20114     return at::_ops::_foreach_sqrt::call(self);
20115   }
20116 
20117   auto results = batch_rule(self);
20118   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20119 }
20120 template <typename batch_rule_t, batch_rule_t batch_rule>
20121 void _foreach_sqrt__generated_plumbing(at::TensorList self) {
20122   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20123   auto maybe_layer = maybeCurrentDynamicLayer();
20124   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20125   int64_t cur_level = maybe_layer->layerId();
20126   if (!isBatchedAtLevel(self, cur_level)) {
20127     return at::_ops::_foreach_sqrt_::call(self);
20128   }
20129 
20130   batch_rule(self);
20131 }
20132 template <typename batch_rule_t, batch_rule_t batch_rule>
20133 ::std::vector<at::Tensor> _foreach_tan_generated_plumbing(at::TensorList self) {
20134   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20135   auto maybe_layer = maybeCurrentDynamicLayer();
20136   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20137   int64_t cur_level = maybe_layer->layerId();
20138   if (!isBatchedAtLevel(self, cur_level)) {
20139     return at::_ops::_foreach_tan::call(self);
20140   }
20141 
20142   auto results = batch_rule(self);
20143   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20144 }
20145 template <typename batch_rule_t, batch_rule_t batch_rule>
20146 void _foreach_tan__generated_plumbing(at::TensorList self) {
20147   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20148   auto maybe_layer = maybeCurrentDynamicLayer();
20149   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20150   int64_t cur_level = maybe_layer->layerId();
20151   if (!isBatchedAtLevel(self, cur_level)) {
20152     return at::_ops::_foreach_tan_::call(self);
20153   }
20154 
20155   batch_rule(self);
20156 }
20157 template <typename batch_rule_t, batch_rule_t batch_rule>
20158 ::std::vector<at::Tensor> _foreach_tanh_generated_plumbing(at::TensorList self) {
20159   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20160   auto maybe_layer = maybeCurrentDynamicLayer();
20161   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20162   int64_t cur_level = maybe_layer->layerId();
20163   if (!isBatchedAtLevel(self, cur_level)) {
20164     return at::_ops::_foreach_tanh::call(self);
20165   }
20166 
20167   auto results = batch_rule(self);
20168   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20169 }
20170 template <typename batch_rule_t, batch_rule_t batch_rule>
20171 void _foreach_tanh__generated_plumbing(at::TensorList self) {
20172   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20173   auto maybe_layer = maybeCurrentDynamicLayer();
20174   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20175   int64_t cur_level = maybe_layer->layerId();
20176   if (!isBatchedAtLevel(self, cur_level)) {
20177     return at::_ops::_foreach_tanh_::call(self);
20178   }
20179 
20180   batch_rule(self);
20181 }
20182 template <typename batch_rule_t, batch_rule_t batch_rule>
20183 ::std::vector<at::Tensor> _foreach_trunc_generated_plumbing(at::TensorList self) {
20184   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20185   auto maybe_layer = maybeCurrentDynamicLayer();
20186   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20187   int64_t cur_level = maybe_layer->layerId();
20188   if (!isBatchedAtLevel(self, cur_level)) {
20189     return at::_ops::_foreach_trunc::call(self);
20190   }
20191 
20192   auto results = batch_rule(self);
20193   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20194 }
20195 template <typename batch_rule_t, batch_rule_t batch_rule>
20196 void _foreach_trunc__generated_plumbing(at::TensorList self) {
20197   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20198   auto maybe_layer = maybeCurrentDynamicLayer();
20199   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20200   int64_t cur_level = maybe_layer->layerId();
20201   if (!isBatchedAtLevel(self, cur_level)) {
20202     return at::_ops::_foreach_trunc_::call(self);
20203   }
20204 
20205   batch_rule(self);
20206 }
20207 template <typename batch_rule_t, batch_rule_t batch_rule>
20208 void _foreach_zero__generated_plumbing(at::TensorList self) {
20209   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20210   auto maybe_layer = maybeCurrentDynamicLayer();
20211   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20212   int64_t cur_level = maybe_layer->layerId();
20213   if (!isBatchedAtLevel(self, cur_level)) {
20214     return at::_ops::_foreach_zero_::call(self);
20215   }
20216 
20217   batch_rule(self);
20218 }
20219 template <typename batch_rule_t, batch_rule_t batch_rule>
20220 void _foreach_copy__generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) {
20221   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20222   auto maybe_layer = maybeCurrentDynamicLayer();
20223   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
20224   int64_t cur_level = maybe_layer->layerId();
20225   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
20226     return at::_ops::_foreach_copy_::call(self, src, non_blocking);
20227   }
20228 
20229   batch_rule(self, src, non_blocking);
20230 }
20231 template <typename batch_rule_t, batch_rule_t batch_rule>
20232 ::std::vector<at::Tensor> _foreach_copy_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) {
20233   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20234   auto maybe_layer = maybeCurrentDynamicLayer();
20235   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20236   int64_t cur_level = maybe_layer->layerId();
20237   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
20238     return at::_ops::_foreach_copy::call(self, src, non_blocking);
20239   }
20240 
20241   auto results = batch_rule(self, src, non_blocking);
20242   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20243 }
20244 template <typename batch_rule_t, batch_rule_t batch_rule>
20245 at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
20246   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20247   auto maybe_layer = maybeCurrentDynamicLayer();
20248   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20249   int64_t cur_level = maybe_layer->layerId();
20250   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) {
20251     return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right);
20252   }
20253   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20254   auto [boundaries_value, boundaries_bdim] = unwrapTensorAtLevel(boundaries, cur_level);
20255   auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right);
20256   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20257 }
20258 template <typename batch_rule_t, batch_rule_t batch_rule>
20259 at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
20260   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20261   auto maybe_layer = maybeCurrentDynamicLayer();
20262   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20263   int64_t cur_level = maybe_layer->layerId();
20264   if (!isBatchedAtLevel(boundaries, cur_level)) {
20265     return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right);
20266   }
20267   auto [boundaries_value, boundaries_bdim] = unwrapTensorAtLevel(boundaries, cur_level);
20268   auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right);
20269   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20270 }
20271 template <typename batch_rule_t, batch_rule_t batch_rule>
20272 at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
20273   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20274   auto maybe_layer = maybeCurrentDynamicLayer();
20275   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20276   int64_t cur_level = maybe_layer->layerId();
20277   if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
20278     return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter);
20279   }
20280   auto [sorted_sequence_value, sorted_sequence_bdim] = unwrapTensorAtLevel(sorted_sequence, cur_level);
20281   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20282   std::optional<Tensor> sorter_value;
20283   std::optional<int64_t> sorter_bdim;
20284   if (sorter) {
20285       std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
20286   }
20287   auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim);
20288   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20289 }
20290 template <typename batch_rule_t, batch_rule_t batch_rule>
20291 at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
20292   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20293   auto maybe_layer = maybeCurrentDynamicLayer();
20294   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20295   int64_t cur_level = maybe_layer->layerId();
20296   if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
20297     return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter);
20298   }
20299   auto [sorted_sequence_value, sorted_sequence_bdim] = unwrapTensorAtLevel(sorted_sequence, cur_level);
20300   std::optional<Tensor> sorter_value;
20301   std::optional<int64_t> sorter_bdim;
20302   if (sorter) {
20303       std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
20304   }
20305   auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim);
20306   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20307 }
20308 template <typename batch_rule_t, batch_rule_t batch_rule>
20309 at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) {
20310   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20311   auto maybe_layer = maybeCurrentDynamicLayer();
20312   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20313   int64_t cur_level = maybe_layer->layerId();
20314   if (!isBatchedAtLevel(self, cur_level)) {
20315     return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32);
20316   }
20317   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20318   auto results = batch_rule(self_value, self_bdim, size, out_int32);
20319   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20320 }
20321 template <typename batch_rule_t, batch_rule_t batch_rule>
20322 at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
20323   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20324   auto maybe_layer = maybeCurrentDynamicLayer();
20325   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20326   int64_t cur_level = maybe_layer->layerId();
20327   if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) {
20328     return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
20329   }
20330   auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level);
20331   auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level);
20332   auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose);
20333   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20334 }
20335 template <typename batch_rule_t, batch_rule_t batch_rule>
20336 at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20337   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20338   auto maybe_layer = maybeCurrentDynamicLayer();
20339   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20340   int64_t cur_level = maybe_layer->layerId();
20341   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20342     return at::_ops::mse_loss::call(self, target, reduction);
20343   }
20344   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20345   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20346   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
20347   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20348 }
20349 template <typename batch_rule_t, batch_rule_t batch_rule>
20350 at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20351   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20352   auto maybe_layer = maybeCurrentDynamicLayer();
20353   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20354   int64_t cur_level = maybe_layer->layerId();
20355   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20356     return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction);
20357   }
20358   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20359   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20360   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20361   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
20362   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20363 }
20364 template <typename batch_rule_t, batch_rule_t batch_rule>
20365 at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20366   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20367   auto maybe_layer = maybeCurrentDynamicLayer();
20368   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20369   int64_t cur_level = maybe_layer->layerId();
20370   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20371     return at::_ops::l1_loss::call(self, target, reduction);
20372   }
20373   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20374   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20375   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
20376   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20377 }
20378 template <typename batch_rule_t, batch_rule_t batch_rule>
20379 at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
20380   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20381   auto maybe_layer = maybeCurrentDynamicLayer();
20382   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20383   int64_t cur_level = maybe_layer->layerId();
20384   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20385     return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction);
20386   }
20387   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20388   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20389   std::optional<Tensor> weight_value;
20390   std::optional<int64_t> weight_bdim;
20391   if (weight) {
20392       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20393   }
20394   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
20395   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20396 }
20397 template <typename batch_rule_t, batch_rule_t batch_rule>
20398 at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
20399   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20400   auto maybe_layer = maybeCurrentDynamicLayer();
20401   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20402   int64_t cur_level = maybe_layer->layerId();
20403   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20404     return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction);
20405   }
20406   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20407   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20408   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20409   std::optional<Tensor> weight_value;
20410   std::optional<int64_t> weight_bdim;
20411   if (weight) {
20412       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20413   }
20414   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
20415   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20416 }
20417 template <typename batch_rule_t, batch_rule_t batch_rule>
20418 at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20419   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20420   auto maybe_layer = maybeCurrentDynamicLayer();
20421   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20422   int64_t cur_level = maybe_layer->layerId();
20423   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20424     return at::_ops::multilabel_margin_loss::call(self, target, reduction);
20425   }
20426   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20427   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20428   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
20429   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20430 }
20431 template <typename batch_rule_t, batch_rule_t batch_rule>
20432 ::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20433   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20434   auto maybe_layer = maybeCurrentDynamicLayer();
20435   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20436   int64_t cur_level = maybe_layer->layerId();
20437   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20438     return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction);
20439   }
20440   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20441   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20442   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
20443   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20444 }
20445 template <typename batch_rule_t, batch_rule_t batch_rule>
20446 at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
20447   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20448   auto maybe_layer = maybeCurrentDynamicLayer();
20449   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20450   int64_t cur_level = maybe_layer->layerId();
20451   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) {
20452     return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target);
20453   }
20454   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20455   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20456   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20457   auto [is_target_value, is_target_bdim] = unwrapTensorAtLevel(is_target, cur_level);
20458   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim);
20459   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20460 }
20461 template <typename batch_rule_t, batch_rule_t batch_rule>
20462 at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
20463   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20464   auto maybe_layer = maybeCurrentDynamicLayer();
20465   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20466   int64_t cur_level = maybe_layer->layerId();
20467   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20468     return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
20469   }
20470   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20471   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20472   std::optional<Tensor> weight_value;
20473   std::optional<int64_t> weight_bdim;
20474   if (weight) {
20475       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20476   }
20477   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
20478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20479 }
20480 template <typename batch_rule_t, batch_rule_t batch_rule>
20481 at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
20482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20483   auto maybe_layer = maybeCurrentDynamicLayer();
20484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20485   int64_t cur_level = maybe_layer->layerId();
20486   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20487     return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
20488   }
20489   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20490   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20491   std::optional<Tensor> weight_value;
20492   std::optional<int64_t> weight_bdim;
20493   if (weight) {
20494       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20495   }
20496   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
20497   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20498 }
20499 template <typename batch_rule_t, batch_rule_t batch_rule>
20500 ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
20501   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20502   auto maybe_layer = maybeCurrentDynamicLayer();
20503   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20504   int64_t cur_level = maybe_layer->layerId();
20505   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20506     return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
20507   }
20508   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20509   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20510   std::optional<Tensor> weight_value;
20511   std::optional<int64_t> weight_bdim;
20512   if (weight) {
20513       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20514   }
20515   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
20516   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20517 }
20518 template <typename batch_rule_t, batch_rule_t batch_rule>
20519 at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
20520   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20521   auto maybe_layer = maybeCurrentDynamicLayer();
20522   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20523   int64_t cur_level = maybe_layer->layerId();
20524   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
20525     return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
20526   }
20527   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20528   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20529   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20530   auto [total_weight_value, total_weight_bdim] = unwrapTensorAtLevel(total_weight, cur_level);
20531   std::optional<Tensor> weight_value;
20532   std::optional<int64_t> weight_bdim;
20533   if (weight) {
20534       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20535   }
20536   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
20537   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20538 }
20539 template <typename batch_rule_t, batch_rule_t batch_rule>
20540 at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
20541   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20542   auto maybe_layer = maybeCurrentDynamicLayer();
20543   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20544   int64_t cur_level = maybe_layer->layerId();
20545   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20546     return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
20547   }
20548   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20549   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20550   std::optional<Tensor> weight_value;
20551   std::optional<int64_t> weight_bdim;
20552   if (weight) {
20553       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20554   }
20555   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
20556   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20557 }
20558 template <typename batch_rule_t, batch_rule_t batch_rule>
20559 ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
20560   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20561   auto maybe_layer = maybeCurrentDynamicLayer();
20562   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20563   int64_t cur_level = maybe_layer->layerId();
20564   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20565     return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
20566   }
20567   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20568   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20569   std::optional<Tensor> weight_value;
20570   std::optional<int64_t> weight_bdim;
20571   if (weight) {
20572       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20573   }
20574   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
20575   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20576 }
20577 template <typename batch_rule_t, batch_rule_t batch_rule>
20578 at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
20579   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20580   auto maybe_layer = maybeCurrentDynamicLayer();
20581   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20582   int64_t cur_level = maybe_layer->layerId();
20583   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
20584     return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
20585   }
20586   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20587   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20588   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20589   auto [total_weight_value, total_weight_bdim] = unwrapTensorAtLevel(total_weight, cur_level);
20590   std::optional<Tensor> weight_value;
20591   std::optional<int64_t> weight_bdim;
20592   if (weight) {
20593       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20594   }
20595   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
20596   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20597 }
20598 template <typename batch_rule_t, batch_rule_t batch_rule>
20599 at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
20600   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20601   auto maybe_layer = maybeCurrentDynamicLayer();
20602   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20603   int64_t cur_level = maybe_layer->layerId();
20604   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20605     return at::_ops::smooth_l1_loss::call(self, target, reduction, beta);
20606   }
20607   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20608   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20609   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta);
20610   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20611 }
20612 template <typename batch_rule_t, batch_rule_t batch_rule>
20613 at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
20614   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20615   auto maybe_layer = maybeCurrentDynamicLayer();
20616   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20617   int64_t cur_level = maybe_layer->layerId();
20618   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20619     return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta);
20620   }
20621   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20622   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20623   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20624   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta);
20625   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20626 }
20627 template <typename batch_rule_t, batch_rule_t batch_rule>
20628 at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
20629   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20630   auto maybe_layer = maybeCurrentDynamicLayer();
20631   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20632   int64_t cur_level = maybe_layer->layerId();
20633   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20634     return at::_ops::huber_loss::call(self, target, reduction, delta);
20635   }
20636   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20637   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20638   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta);
20639   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20640 }
20641 template <typename batch_rule_t, batch_rule_t batch_rule>
20642 at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
20643   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20644   auto maybe_layer = maybeCurrentDynamicLayer();
20645   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20646   int64_t cur_level = maybe_layer->layerId();
20647   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20648     return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta);
20649   }
20650   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20651   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20652   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20653   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta);
20654   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20655 }
20656 template <typename batch_rule_t, batch_rule_t batch_rule>
20657 at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20658   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20659   auto maybe_layer = maybeCurrentDynamicLayer();
20660   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20661   int64_t cur_level = maybe_layer->layerId();
20662   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20663     return at::_ops::soft_margin_loss::call(self, target, reduction);
20664   }
20665   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20666   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20667   auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
20668   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20669 }
20670 template <typename batch_rule_t, batch_rule_t batch_rule>
20671 at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
20672   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20673   auto maybe_layer = maybeCurrentDynamicLayer();
20674   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20675   int64_t cur_level = maybe_layer->layerId();
20676   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
20677     return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction);
20678   }
20679   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20680   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20681   auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level);
20682   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
20683   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20684 }
20685 template <typename batch_rule_t, batch_rule_t batch_rule>
20686 at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
20687   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20688   auto maybe_layer = maybeCurrentDynamicLayer();
20689   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20690   int64_t cur_level = maybe_layer->layerId();
20691   if (!isBatchedAtLevel(self, cur_level)) {
20692     return at::_ops::elu::call(self, alpha, scale, input_scale);
20693   }
20694   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20695   auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale);
20696   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20697 }
20698 template <typename batch_rule_t, batch_rule_t batch_rule>
20699 at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
20700   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20701   auto maybe_layer = maybeCurrentDynamicLayer();
20702   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20703   int64_t cur_level = maybe_layer->layerId();
20704   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) {
20705     return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
20706   }
20707   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20708   auto [self_or_result_value, self_or_result_bdim] = unwrapTensorAtLevel(self_or_result, cur_level);
20709   auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim);
20710   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20711 }
20712 template <typename batch_rule_t, batch_rule_t batch_rule>
20713 at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
20714   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20715   auto maybe_layer = maybeCurrentDynamicLayer();
20716   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20717   int64_t cur_level = maybe_layer->layerId();
20718   if (!isBatchedAtLevel(self, cur_level)) {
20719     return at::_ops::elu_::call(self, alpha, scale, input_scale);
20720   }
20721   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20722   batch_rule(self_value, self_bdim, alpha, scale, input_scale);
20723   return self;
20724 }
20725 template <typename batch_rule_t, batch_rule_t batch_rule>
20726 at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) {
20727   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20728   auto maybe_layer = maybeCurrentDynamicLayer();
20729   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20730   int64_t cur_level = maybe_layer->layerId();
20731   if (!isBatchedAtLevel(self, cur_level)) {
20732     return at::_ops::glu::call(self, dim);
20733   }
20734   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20735   auto results = batch_rule(self_value, self_bdim, dim);
20736   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20737 }
20738 template <typename batch_rule_t, batch_rule_t batch_rule>
20739 at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
20740   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20741   auto maybe_layer = maybeCurrentDynamicLayer();
20742   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20743   int64_t cur_level = maybe_layer->layerId();
20744   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
20745     return at::_ops::glu_backward::call(grad_output, self, dim);
20746   }
20747   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20748   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20749   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim);
20750   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20751 }
20752 template <typename batch_rule_t, batch_rule_t batch_rule>
20753 at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
20754   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20755   auto maybe_layer = maybeCurrentDynamicLayer();
20756   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20757   int64_t cur_level = maybe_layer->layerId();
20758   if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
20759     return at::_ops::glu_jvp::call(glu, x, dx, dim);
20760   }
20761   auto [glu_value, glu_bdim] = unwrapTensorAtLevel(glu, cur_level);
20762   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
20763   auto [dx_value, dx_bdim] = unwrapTensorAtLevel(dx, cur_level);
20764   auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim);
20765   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20766 }
20767 template <typename batch_rule_t, batch_rule_t batch_rule>
20768 at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
20769   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20770   auto maybe_layer = maybeCurrentDynamicLayer();
20771   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20772   int64_t cur_level = maybe_layer->layerId();
20773   if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
20774     return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
20775   }
20776   auto [grad_x_value, grad_x_bdim] = unwrapTensorAtLevel(grad_x, cur_level);
20777   auto [grad_glu_value, grad_glu_bdim] = unwrapTensorAtLevel(grad_glu, cur_level);
20778   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
20779   auto [dgrad_glu_value, dgrad_glu_bdim] = unwrapTensorAtLevel(dgrad_glu, cur_level);
20780   auto [dx_value, dx_bdim] = unwrapTensorAtLevel(dx, cur_level);
20781   auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim);
20782   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20783 }
20784 template <typename batch_rule_t, batch_rule_t batch_rule>
20785 at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) {
20786   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20787   auto maybe_layer = maybeCurrentDynamicLayer();
20788   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20789   int64_t cur_level = maybe_layer->layerId();
20790   if (!isBatchedAtLevel(self, cur_level)) {
20791     return at::_ops::hardsigmoid::call(self);
20792   }
20793   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20794   auto results = batch_rule(self_value, self_bdim);
20795   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20796 }
20797 template <typename batch_rule_t, batch_rule_t batch_rule>
20798 at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) {
20799   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20800   auto maybe_layer = maybeCurrentDynamicLayer();
20801   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20802   int64_t cur_level = maybe_layer->layerId();
20803   if (!isBatchedAtLevel(self, cur_level)) {
20804     return at::_ops::hardsigmoid_::call(self);
20805   }
20806   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20807   batch_rule(self_value, self_bdim);
20808   return self;
20809 }
20810 template <typename batch_rule_t, batch_rule_t batch_rule>
20811 at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
20812   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20813   auto maybe_layer = maybeCurrentDynamicLayer();
20814   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20815   int64_t cur_level = maybe_layer->layerId();
20816   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
20817     return at::_ops::hardsigmoid_backward::call(grad_output, self);
20818   }
20819   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20820   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20821   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
20822   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20823 }
20824 template <typename batch_rule_t, batch_rule_t batch_rule>
20825 at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
20826   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20827   auto maybe_layer = maybeCurrentDynamicLayer();
20828   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20829   int64_t cur_level = maybe_layer->layerId();
20830   if (!isBatchedAtLevel(self, cur_level)) {
20831     return at::_ops::hardtanh::call(self, min_val, max_val);
20832   }
20833   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20834   auto results = batch_rule(self_value, self_bdim, min_val, max_val);
20835   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20836 }
20837 template <typename batch_rule_t, batch_rule_t batch_rule>
20838 at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
20839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20840   auto maybe_layer = maybeCurrentDynamicLayer();
20841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20842   int64_t cur_level = maybe_layer->layerId();
20843   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
20844     return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val);
20845   }
20846   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20847   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20848   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val);
20849   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20850 }
20851 template <typename batch_rule_t, batch_rule_t batch_rule>
20852 at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
20853   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20854   auto maybe_layer = maybeCurrentDynamicLayer();
20855   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20856   int64_t cur_level = maybe_layer->layerId();
20857   if (!isBatchedAtLevel(self, cur_level)) {
20858     return at::_ops::hardtanh_::call(self, min_val, max_val);
20859   }
20860   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20861   batch_rule(self_value, self_bdim, min_val, max_val);
20862   return self;
20863 }
20864 template <typename batch_rule_t, batch_rule_t batch_rule>
20865 at::Tensor hardswish_generated_plumbing(const at::Tensor & self) {
20866   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20867   auto maybe_layer = maybeCurrentDynamicLayer();
20868   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20869   int64_t cur_level = maybe_layer->layerId();
20870   if (!isBatchedAtLevel(self, cur_level)) {
20871     return at::_ops::hardswish::call(self);
20872   }
20873   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20874   auto results = batch_rule(self_value, self_bdim);
20875   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20876 }
20877 template <typename batch_rule_t, batch_rule_t batch_rule>
20878 at::Tensor & hardswish__generated_plumbing(at::Tensor & self) {
20879   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20880   auto maybe_layer = maybeCurrentDynamicLayer();
20881   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20882   int64_t cur_level = maybe_layer->layerId();
20883   if (!isBatchedAtLevel(self, cur_level)) {
20884     return at::_ops::hardswish_::call(self);
20885   }
20886   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20887   batch_rule(self_value, self_bdim);
20888   return self;
20889 }
20890 template <typename batch_rule_t, batch_rule_t batch_rule>
20891 at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
20892   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20893   auto maybe_layer = maybeCurrentDynamicLayer();
20894   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20895   int64_t cur_level = maybe_layer->layerId();
20896   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
20897     return at::_ops::hardswish_backward::call(grad_output, self);
20898   }
20899   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20900   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20901   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
20902   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20903 }
20904 template <typename batch_rule_t, batch_rule_t batch_rule>
20905 at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) {
20906   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20907   auto maybe_layer = maybeCurrentDynamicLayer();
20908   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20909   int64_t cur_level = maybe_layer->layerId();
20910   if (!isBatchedAtLevel(self, cur_level)) {
20911     return at::_ops::leaky_relu::call(self, negative_slope);
20912   }
20913   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20914   auto results = batch_rule(self_value, self_bdim, negative_slope);
20915   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20916 }
20917 template <typename batch_rule_t, batch_rule_t batch_rule>
20918 at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
20919   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20920   auto maybe_layer = maybeCurrentDynamicLayer();
20921   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20922   int64_t cur_level = maybe_layer->layerId();
20923   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
20924     return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result);
20925   }
20926   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20927   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20928   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result);
20929   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20930 }
20931 template <typename batch_rule_t, batch_rule_t batch_rule>
20932 at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) {
20933   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20934   auto maybe_layer = maybeCurrentDynamicLayer();
20935   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20936   int64_t cur_level = maybe_layer->layerId();
20937   if (!isBatchedAtLevel(self, cur_level)) {
20938     return at::_ops::leaky_relu_::call(self, negative_slope);
20939   }
20940   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20941   batch_rule(self_value, self_bdim, negative_slope);
20942   return self;
20943 }
20944 template <typename batch_rule_t, batch_rule_t batch_rule>
20945 at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) {
20946   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20947   auto maybe_layer = maybeCurrentDynamicLayer();
20948   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20949   int64_t cur_level = maybe_layer->layerId();
20950   if (!isBatchedAtLevel(self, cur_level)) {
20951     return at::_ops::log_sigmoid::call(self);
20952   }
20953   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20954   auto results = batch_rule(self_value, self_bdim);
20955   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20956 }
20957 template <typename batch_rule_t, batch_rule_t batch_rule>
20958 ::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward_generated_plumbing(const at::Tensor & self) {
20959   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20960   auto maybe_layer = maybeCurrentDynamicLayer();
20961   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20962   int64_t cur_level = maybe_layer->layerId();
20963   if (!isBatchedAtLevel(self, cur_level)) {
20964     return at::_ops::log_sigmoid_forward::call(self);
20965   }
20966   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20967   auto results = batch_rule(self_value, self_bdim);
20968   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20969 }
20970 template <typename batch_rule_t, batch_rule_t batch_rule>
20971 at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
20972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20973   auto maybe_layer = maybeCurrentDynamicLayer();
20974   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20975   int64_t cur_level = maybe_layer->layerId();
20976   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) {
20977     return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer);
20978   }
20979   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20980   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20981   auto [buffer_value, buffer_bdim] = unwrapTensorAtLevel(buffer, cur_level);
20982   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim);
20983   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20984 }
20985 template <typename batch_rule_t, batch_rule_t batch_rule>
20986 at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
20987   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20988   auto maybe_layer = maybeCurrentDynamicLayer();
20989   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20990   int64_t cur_level = maybe_layer->layerId();
20991   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
20992     return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result);
20993   }
20994   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
20995   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
20996   auto [noise_value, noise_bdim] = unwrapTensorAtLevel(noise, cur_level);
20997   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result);
20998   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20999 }
21000 template <typename batch_rule_t, batch_rule_t batch_rule>
21001 at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
21002   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21003   auto maybe_layer = maybeCurrentDynamicLayer();
21004   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21005   int64_t cur_level = maybe_layer->layerId();
21006   if (!isBatchedAtLevel(self, cur_level)) {
21007     return at::_ops::softplus::call(self, beta, threshold);
21008   }
21009   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21010   auto results = batch_rule(self_value, self_bdim, beta, threshold);
21011   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21012 }
21013 template <typename batch_rule_t, batch_rule_t batch_rule>
21014 at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
21015   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21016   auto maybe_layer = maybeCurrentDynamicLayer();
21017   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21018   int64_t cur_level = maybe_layer->layerId();
21019   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21020     return at::_ops::softplus_backward::call(grad_output, self, beta, threshold);
21021   }
21022   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21023   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21024   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold);
21025   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21026 }
21027 template <typename batch_rule_t, batch_rule_t batch_rule>
21028 at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
21029   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21030   auto maybe_layer = maybeCurrentDynamicLayer();
21031   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21032   int64_t cur_level = maybe_layer->layerId();
21033   if (!isBatchedAtLevel(self, cur_level)) {
21034     return at::_ops::softshrink::call(self, lambd);
21035   }
21036   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21037   auto results = batch_rule(self_value, self_bdim, lambd);
21038   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21039 }
21040 template <typename batch_rule_t, batch_rule_t batch_rule>
21041 at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
21042   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21043   auto maybe_layer = maybeCurrentDynamicLayer();
21044   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21045   int64_t cur_level = maybe_layer->layerId();
21046   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21047     return at::_ops::softshrink_backward::call(grad_output, self, lambd);
21048   }
21049   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21050   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21051   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd);
21052   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21053 }
21054 template <typename batch_rule_t, batch_rule_t batch_rule>
21055 at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
21056   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21057   auto maybe_layer = maybeCurrentDynamicLayer();
21058   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21059   int64_t cur_level = maybe_layer->layerId();
21060   if (!isBatchedAtLevel(self, cur_level)) {
21061     return at::_ops::adaptive_avg_pool2d::call(self, output_size);
21062   }
21063   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21064   auto results = batch_rule(self_value, self_bdim, output_size);
21065   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21066 }
21067 template <typename batch_rule_t, batch_rule_t batch_rule>
21068 at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
21069   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21070   auto maybe_layer = maybeCurrentDynamicLayer();
21071   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21072   int64_t cur_level = maybe_layer->layerId();
21073   if (!isBatchedAtLevel(self, cur_level)) {
21074     return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size);
21075   }
21076   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21077   auto results = batch_rule(self_value, self_bdim, output_size);
21078   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21079 }
21080 template <typename batch_rule_t, batch_rule_t batch_rule>
21081 at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
21082   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21083   auto maybe_layer = maybeCurrentDynamicLayer();
21084   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21085   int64_t cur_level = maybe_layer->layerId();
21086   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21087     return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self);
21088   }
21089   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21090   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21091   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
21092   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21093 }
21094 template <typename batch_rule_t, batch_rule_t batch_rule>
21095 at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
21096   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21097   auto maybe_layer = maybeCurrentDynamicLayer();
21098   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21099   int64_t cur_level = maybe_layer->layerId();
21100   if (!isBatchedAtLevel(self, cur_level)) {
21101     return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
21102   }
21103   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21104   auto results = batch_rule(self_value, self_bdim, output_size);
21105   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21106 }
21107 template <typename batch_rule_t, batch_rule_t batch_rule>
21108 at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
21109   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21110   auto maybe_layer = maybeCurrentDynamicLayer();
21111   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21112   int64_t cur_level = maybe_layer->layerId();
21113   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21114     return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self);
21115   }
21116   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21117   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21118   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
21119   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21120 }
21121 template <typename batch_rule_t, batch_rule_t batch_rule>
21122 at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
21123   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21124   auto maybe_layer = maybeCurrentDynamicLayer();
21125   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21126   int64_t cur_level = maybe_layer->layerId();
21127   if (!isBatchedAtLevel(self, cur_level)) {
21128     return at::_ops::adaptive_avg_pool3d::call(self, output_size);
21129   }
21130   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21131   auto results = batch_rule(self_value, self_bdim, output_size);
21132   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21133 }
21134 template <typename batch_rule_t, batch_rule_t batch_rule>
21135 at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
21136   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21137   auto maybe_layer = maybeCurrentDynamicLayer();
21138   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21139   int64_t cur_level = maybe_layer->layerId();
21140   if (!isBatchedAtLevel(self, cur_level)) {
21141     return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
21142   }
21143   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21144   auto results = batch_rule(self_value, self_bdim, output_size);
21145   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21146 }
21147 template <typename batch_rule_t, batch_rule_t batch_rule>
21148 at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
21149   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21150   auto maybe_layer = maybeCurrentDynamicLayer();
21151   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21152   int64_t cur_level = maybe_layer->layerId();
21153   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21154     return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self);
21155   }
21156   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21157   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21158   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
21159   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21160 }
21161 template <typename batch_rule_t, batch_rule_t batch_rule>
21162 ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
21163   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21164   auto maybe_layer = maybeCurrentDynamicLayer();
21165   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21166   int64_t cur_level = maybe_layer->layerId();
21167   if (!isBatchedAtLevel(self, cur_level)) {
21168     return at::_ops::adaptive_max_pool2d::call(self, output_size);
21169   }
21170   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21171   auto results = batch_rule(self_value, self_bdim, output_size);
21172   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
21173 }
21174 template <typename batch_rule_t, batch_rule_t batch_rule>
21175 at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
21176   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21177   auto maybe_layer = maybeCurrentDynamicLayer();
21178   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21179   int64_t cur_level = maybe_layer->layerId();
21180   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21181     return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
21182   }
21183   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21184   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21185   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21186   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
21187   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21188 }
21189 template <typename batch_rule_t, batch_rule_t batch_rule>
21190 ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
21191   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21192   auto maybe_layer = maybeCurrentDynamicLayer();
21193   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21194   int64_t cur_level = maybe_layer->layerId();
21195   if (!isBatchedAtLevel(self, cur_level)) {
21196     return at::_ops::adaptive_max_pool3d::call(self, output_size);
21197   }
21198   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21199   auto results = batch_rule(self_value, self_bdim, output_size);
21200   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
21201 }
21202 template <typename batch_rule_t, batch_rule_t batch_rule>
21203 at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
21204   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21205   auto maybe_layer = maybeCurrentDynamicLayer();
21206   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21207   int64_t cur_level = maybe_layer->layerId();
21208   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21209     return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices);
21210   }
21211   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21212   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21213   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21214   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
21215   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21216 }
21217 template <typename batch_rule_t, batch_rule_t batch_rule>
21218 at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
21219   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21220   auto maybe_layer = maybeCurrentDynamicLayer();
21221   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21222   int64_t cur_level = maybe_layer->layerId();
21223   if (!isBatchedAtLevel(self, cur_level)) {
21224     return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21225   }
21226   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21227   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21228   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21229 }
21230 template <typename batch_rule_t, batch_rule_t batch_rule>
21231 at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
21232   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21233   auto maybe_layer = maybeCurrentDynamicLayer();
21234   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21235   int64_t cur_level = maybe_layer->layerId();
21236   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21237     return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21238   }
21239   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21240   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21241   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21242   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21243 }
21244 template <typename batch_rule_t, batch_rule_t batch_rule>
21245 at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
21246   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21247   auto maybe_layer = maybeCurrentDynamicLayer();
21248   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21249   int64_t cur_level = maybe_layer->layerId();
21250   if (!isBatchedAtLevel(self, cur_level)) {
21251     return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21252   }
21253   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21254   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21255   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21256 }
21257 template <typename batch_rule_t, batch_rule_t batch_rule>
21258 at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
21259   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21260   auto maybe_layer = maybeCurrentDynamicLayer();
21261   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21262   int64_t cur_level = maybe_layer->layerId();
21263   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21264     return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21265   }
21266   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21267   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21268   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
21269   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21270 }
21271 template <typename batch_rule_t, batch_rule_t batch_rule>
21272 ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
21273   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21274   auto maybe_layer = maybeCurrentDynamicLayer();
21275   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21276   int64_t cur_level = maybe_layer->layerId();
21277   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
21278     return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples);
21279   }
21280   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21281   auto [random_samples_value, random_samples_bdim] = unwrapTensorAtLevel(random_samples, cur_level);
21282   auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
21283   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
21284 }
21285 template <typename batch_rule_t, batch_rule_t batch_rule>
21286 at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
21287   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21288   auto maybe_layer = maybeCurrentDynamicLayer();
21289   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21290   int64_t cur_level = maybe_layer->layerId();
21291   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21292     return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices);
21293   }
21294   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21295   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21296   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21297   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
21298   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21299 }
21300 template <typename batch_rule_t, batch_rule_t batch_rule>
21301 ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
21302   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21303   auto maybe_layer = maybeCurrentDynamicLayer();
21304   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21305   int64_t cur_level = maybe_layer->layerId();
21306   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
21307     return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples);
21308   }
21309   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21310   auto [random_samples_value, random_samples_bdim] = unwrapTensorAtLevel(random_samples, cur_level);
21311   auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
21312   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
21313 }
21314 template <typename batch_rule_t, batch_rule_t batch_rule>
21315 at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
21316   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21317   auto maybe_layer = maybeCurrentDynamicLayer();
21318   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21319   int64_t cur_level = maybe_layer->layerId();
21320   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21321     return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
21322   }
21323   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21324   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21325   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21326   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
21327   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21328 }
21329 template <typename batch_rule_t, batch_rule_t batch_rule>
21330 ::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
21331   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21332   auto maybe_layer = maybeCurrentDynamicLayer();
21333   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21334   int64_t cur_level = maybe_layer->layerId();
21335   if (!isBatchedAtLevel(self, cur_level)) {
21336     return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
21337   }
21338   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21339   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
21340   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
21341 }
21342 template <typename batch_rule_t, batch_rule_t batch_rule>
21343 at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
21344   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21345   auto maybe_layer = maybeCurrentDynamicLayer();
21346   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21347   int64_t cur_level = maybe_layer->layerId();
21348   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21349     return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
21350   }
21351   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21352   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21353   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21354   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
21355   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21356 }
21357 template <typename batch_rule_t, batch_rule_t batch_rule>
21358 ::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
21359   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21360   auto maybe_layer = maybeCurrentDynamicLayer();
21361   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21362   int64_t cur_level = maybe_layer->layerId();
21363   if (!isBatchedAtLevel(self, cur_level)) {
21364     return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
21365   }
21366   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21367   auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
21368   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
21369 }
21370 template <typename batch_rule_t, batch_rule_t batch_rule>
21371 at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
21372   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21373   auto maybe_layer = maybeCurrentDynamicLayer();
21374   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21375   int64_t cur_level = maybe_layer->layerId();
21376   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21377     return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
21378   }
21379   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21380   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21381   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21382   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
21383   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21384 }
21385 template <typename batch_rule_t, batch_rule_t batch_rule>
21386 at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
21387   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21388   auto maybe_layer = maybeCurrentDynamicLayer();
21389   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21390   int64_t cur_level = maybe_layer->layerId();
21391   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21392     return at::_ops::max_unpool2d::call(self, indices, output_size);
21393   }
21394   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21395   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21396   auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size);
21397   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21398 }
21399 template <typename batch_rule_t, batch_rule_t batch_rule>
21400 at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
21401   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21402   auto maybe_layer = maybeCurrentDynamicLayer();
21403   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21404   int64_t cur_level = maybe_layer->layerId();
21405   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
21406     return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding);
21407   }
21408   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21409   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
21410   auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding);
21411   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21412 }
21413 template <typename batch_rule_t, batch_rule_t batch_rule>
21414 at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
21415   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21416   auto maybe_layer = maybeCurrentDynamicLayer();
21417   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21418   int64_t cur_level = maybe_layer->layerId();
21419   if (!isBatchedAtLevel(self, cur_level)) {
21420     return at::_ops::reflection_pad1d::call(self, padding);
21421   }
21422   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21423   auto results = batch_rule(self_value, self_bdim, padding);
21424   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21425 }
21426 template <typename batch_rule_t, batch_rule_t batch_rule>
21427 at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
21428   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21429   auto maybe_layer = maybeCurrentDynamicLayer();
21430   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21431   int64_t cur_level = maybe_layer->layerId();
21432   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21433     return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
21434   }
21435   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21436   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21437   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
21438   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21439 }
21440 template <typename batch_rule_t, batch_rule_t batch_rule>
21441 at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
21442   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21443   auto maybe_layer = maybeCurrentDynamicLayer();
21444   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21445   int64_t cur_level = maybe_layer->layerId();
21446   if (!isBatchedAtLevel(self, cur_level)) {
21447     return at::_ops::reflection_pad2d::call(self, padding);
21448   }
21449   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21450   auto results = batch_rule(self_value, self_bdim, padding);
21451   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21452 }
21453 template <typename batch_rule_t, batch_rule_t batch_rule>
21454 at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
21455   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21456   auto maybe_layer = maybeCurrentDynamicLayer();
21457   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21458   int64_t cur_level = maybe_layer->layerId();
21459   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21460     return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
21461   }
21462   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21463   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21464   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
21465   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21466 }
21467 template <typename batch_rule_t, batch_rule_t batch_rule>
21468 at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
21469   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21470   auto maybe_layer = maybeCurrentDynamicLayer();
21471   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21472   int64_t cur_level = maybe_layer->layerId();
21473   if (!isBatchedAtLevel(self, cur_level)) {
21474     return at::_ops::reflection_pad3d::call(self, padding);
21475   }
21476   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21477   auto results = batch_rule(self_value, self_bdim, padding);
21478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21479 }
21480 template <typename batch_rule_t, batch_rule_t batch_rule>
21481 at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
21482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21483   auto maybe_layer = maybeCurrentDynamicLayer();
21484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21485   int64_t cur_level = maybe_layer->layerId();
21486   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21487     return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
21488   }
21489   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21490   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21491   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
21492   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21493 }
21494 template <typename batch_rule_t, batch_rule_t batch_rule>
21495 at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
21496   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21497   auto maybe_layer = maybeCurrentDynamicLayer();
21498   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21499   int64_t cur_level = maybe_layer->layerId();
21500   if (!isBatchedAtLevel(self, cur_level)) {
21501     return at::_ops::replication_pad1d::call(self, padding);
21502   }
21503   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21504   auto results = batch_rule(self_value, self_bdim, padding);
21505   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21506 }
21507 template <typename batch_rule_t, batch_rule_t batch_rule>
21508 at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
21509   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21510   auto maybe_layer = maybeCurrentDynamicLayer();
21511   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21512   int64_t cur_level = maybe_layer->layerId();
21513   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21514     return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
21515   }
21516   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21517   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21518   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
21519   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21520 }
21521 template <typename batch_rule_t, batch_rule_t batch_rule>
21522 at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
21523   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21524   auto maybe_layer = maybeCurrentDynamicLayer();
21525   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21526   int64_t cur_level = maybe_layer->layerId();
21527   if (!isBatchedAtLevel(self, cur_level)) {
21528     return at::_ops::replication_pad2d::call(self, padding);
21529   }
21530   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21531   auto results = batch_rule(self_value, self_bdim, padding);
21532   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21533 }
21534 template <typename batch_rule_t, batch_rule_t batch_rule>
21535 at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
21536   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21537   auto maybe_layer = maybeCurrentDynamicLayer();
21538   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21539   int64_t cur_level = maybe_layer->layerId();
21540   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21541     return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
21542   }
21543   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21544   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21545   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
21546   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21547 }
21548 template <typename batch_rule_t, batch_rule_t batch_rule>
21549 at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
21550   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21551   auto maybe_layer = maybeCurrentDynamicLayer();
21552   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21553   int64_t cur_level = maybe_layer->layerId();
21554   if (!isBatchedAtLevel(self, cur_level)) {
21555     return at::_ops::replication_pad3d::call(self, padding);
21556   }
21557   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21558   auto results = batch_rule(self_value, self_bdim, padding);
21559   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21560 }
21561 template <typename batch_rule_t, batch_rule_t batch_rule>
21562 at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
21563   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21564   auto maybe_layer = maybeCurrentDynamicLayer();
21565   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21566   int64_t cur_level = maybe_layer->layerId();
21567   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
21568     return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
21569   }
21570   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21571   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21572   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
21573   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21574 }
21575 template <typename batch_rule_t, batch_rule_t batch_rule>
21576 at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) {
21577   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21578   auto maybe_layer = maybeCurrentDynamicLayer();
21579   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21580   int64_t cur_level = maybe_layer->layerId();
21581   if (!isBatchedAtLevel(self, cur_level)) {
21582     return at::_ops::_pad_circular::call(self, pad);
21583   }
21584   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21585   auto results = batch_rule(self_value, self_bdim, pad);
21586   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21587 }
21588 template <typename batch_rule_t, batch_rule_t batch_rule>
21589 at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional<double> value) {
21590   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21591   auto maybe_layer = maybeCurrentDynamicLayer();
21592   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21593   int64_t cur_level = maybe_layer->layerId();
21594   if (!isBatchedAtLevel(self, cur_level)) {
21595     return at::_ops::_pad_enum::call(self, pad, mode, value);
21596   }
21597   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21598   auto results = batch_rule(self_value, self_bdim, pad, mode, value);
21599   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21600 }
21601 template <typename batch_rule_t, batch_rule_t batch_rule>
21602 at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, ::std::optional<double> value) {
21603   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21604   auto maybe_layer = maybeCurrentDynamicLayer();
21605   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21606   int64_t cur_level = maybe_layer->layerId();
21607   if (!isBatchedAtLevel(self, cur_level)) {
21608     return at::_ops::pad::call(self, pad, mode, value);
21609   }
21610   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21611   auto results = batch_rule(self_value, self_bdim, pad, mode, value);
21612   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21613 }
21614 template <typename batch_rule_t, batch_rule_t batch_rule>
21615 at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
21616   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21617   auto maybe_layer = maybeCurrentDynamicLayer();
21618   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21619   int64_t cur_level = maybe_layer->layerId();
21620   if (!isBatchedAtLevel(input, cur_level)) {
21621     return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
21622   }
21623   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21624   auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
21625   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21626 }
21627 template <typename batch_rule_t, batch_rule_t batch_rule>
21628 at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
21629   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21630   auto maybe_layer = maybeCurrentDynamicLayer();
21631   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21632   int64_t cur_level = maybe_layer->layerId();
21633   if (!isBatchedAtLevel(input, cur_level)) {
21634     return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
21635   }
21636   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21637   auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
21638   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21639 }
21640 template <typename batch_rule_t, batch_rule_t batch_rule>
21641 at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
21642   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21643   auto maybe_layer = maybeCurrentDynamicLayer();
21644   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21645   int64_t cur_level = maybe_layer->layerId();
21646   if (!isBatchedAtLevel(input, cur_level)) {
21647     return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
21648   }
21649   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21650   auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
21651   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21652 }
21653 template <typename batch_rule_t, batch_rule_t batch_rule>
21654 at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
21655   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21656   auto maybe_layer = maybeCurrentDynamicLayer();
21657   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21658   int64_t cur_level = maybe_layer->layerId();
21659   if (!isBatchedAtLevel(input, cur_level)) {
21660     return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
21661   }
21662   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21663   auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
21664   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21665 }
21666 template <typename batch_rule_t, batch_rule_t batch_rule>
21667 at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
21668   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21669   auto maybe_layer = maybeCurrentDynamicLayer();
21670   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21671   int64_t cur_level = maybe_layer->layerId();
21672   if (!isBatchedAtLevel(input, cur_level)) {
21673     return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
21674   }
21675   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21676   auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
21677   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21678 }
21679 template <typename batch_rule_t, batch_rule_t batch_rule>
21680 at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
21681   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21682   auto maybe_layer = maybeCurrentDynamicLayer();
21683   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21684   int64_t cur_level = maybe_layer->layerId();
21685   if (!isBatchedAtLevel(input, cur_level)) {
21686     return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
21687   }
21688   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21689   auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
21690   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21691 }
21692 template <typename batch_rule_t, batch_rule_t batch_rule>
21693 at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
21694   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21695   auto maybe_layer = maybeCurrentDynamicLayer();
21696   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21697   int64_t cur_level = maybe_layer->layerId();
21698   if (!isBatchedAtLevel(input, cur_level)) {
21699     return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
21700   }
21701   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21702   auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
21703   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21704 }
21705 template <typename batch_rule_t, batch_rule_t batch_rule>
21706 at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
21707   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21708   auto maybe_layer = maybeCurrentDynamicLayer();
21709   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21710   int64_t cur_level = maybe_layer->layerId();
21711   if (!isBatchedAtLevel(input, cur_level)) {
21712     return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
21713   }
21714   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21715   auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
21716   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21717 }
21718 template <typename batch_rule_t, batch_rule_t batch_rule>
21719 at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
21720   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21721   auto maybe_layer = maybeCurrentDynamicLayer();
21722   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21723   int64_t cur_level = maybe_layer->layerId();
21724   if (!isBatchedAtLevel(input, cur_level)) {
21725     return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
21726   }
21727   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21728   auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
21729   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21730 }
21731 template <typename batch_rule_t, batch_rule_t batch_rule>
21732 at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
21733   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21734   auto maybe_layer = maybeCurrentDynamicLayer();
21735   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21736   int64_t cur_level = maybe_layer->layerId();
21737   if (!isBatchedAtLevel(input, cur_level)) {
21738     return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
21739   }
21740   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21741   auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
21742   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21743 }
21744 template <typename batch_rule_t, batch_rule_t batch_rule>
21745 at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
21746   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21747   auto maybe_layer = maybeCurrentDynamicLayer();
21748   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21749   int64_t cur_level = maybe_layer->layerId();
21750   if (!isBatchedAtLevel(input, cur_level)) {
21751     return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
21752   }
21753   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21754   auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
21755   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21756 }
21757 template <typename batch_rule_t, batch_rule_t batch_rule>
21758 at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
21759   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21760   auto maybe_layer = maybeCurrentDynamicLayer();
21761   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21762   int64_t cur_level = maybe_layer->layerId();
21763   if (!isBatchedAtLevel(input, cur_level)) {
21764     return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
21765   }
21766   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
21767   auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
21768   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21769 }
21770 template <typename batch_rule_t, batch_rule_t batch_rule>
21771 at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
21772   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21773   auto maybe_layer = maybeCurrentDynamicLayer();
21774   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21775   int64_t cur_level = maybe_layer->layerId();
21776   if (!isBatchedAtLevel(self, cur_level)) {
21777     return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
21778   }
21779   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21780   auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales);
21781   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21782 }
21783 template <typename batch_rule_t, batch_rule_t batch_rule>
21784 at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
21785   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21786   auto maybe_layer = maybeCurrentDynamicLayer();
21787   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21788   int64_t cur_level = maybe_layer->layerId();
21789   if (!isBatchedAtLevel(grad_output, cur_level)) {
21790     return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
21791   }
21792   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21793   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales);
21794   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21795 }
21796 template <typename batch_rule_t, batch_rule_t batch_rule>
21797 at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21798   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21799   auto maybe_layer = maybeCurrentDynamicLayer();
21800   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21801   int64_t cur_level = maybe_layer->layerId();
21802   if (!isBatchedAtLevel(self, cur_level)) {
21803     return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
21804   }
21805   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21806   auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
21807   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21808 }
21809 template <typename batch_rule_t, batch_rule_t batch_rule>
21810 at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21811   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21812   auto maybe_layer = maybeCurrentDynamicLayer();
21813   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21814   int64_t cur_level = maybe_layer->layerId();
21815   if (!isBatchedAtLevel(grad_output, cur_level)) {
21816     return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
21817   }
21818   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21819   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
21820   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21821 }
21822 template <typename batch_rule_t, batch_rule_t batch_rule>
21823 at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21824   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21825   auto maybe_layer = maybeCurrentDynamicLayer();
21826   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21827   int64_t cur_level = maybe_layer->layerId();
21828   if (!isBatchedAtLevel(self, cur_level)) {
21829     return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
21830   }
21831   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21832   auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
21833   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21834 }
21835 template <typename batch_rule_t, batch_rule_t batch_rule>
21836 at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21837   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21838   auto maybe_layer = maybeCurrentDynamicLayer();
21839   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21840   int64_t cur_level = maybe_layer->layerId();
21841   if (!isBatchedAtLevel(grad_output, cur_level)) {
21842     return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
21843   }
21844   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21845   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
21846   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21847 }
21848 template <typename batch_rule_t, batch_rule_t batch_rule>
21849 at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21850   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21851   auto maybe_layer = maybeCurrentDynamicLayer();
21852   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21853   int64_t cur_level = maybe_layer->layerId();
21854   if (!isBatchedAtLevel(self, cur_level)) {
21855     return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
21856   }
21857   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21858   auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
21859   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21860 }
21861 template <typename batch_rule_t, batch_rule_t batch_rule>
21862 at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21863   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21864   auto maybe_layer = maybeCurrentDynamicLayer();
21865   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21866   int64_t cur_level = maybe_layer->layerId();
21867   if (!isBatchedAtLevel(grad_output, cur_level)) {
21868     return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
21869   }
21870   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21871   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
21872   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21873 }
21874 template <typename batch_rule_t, batch_rule_t batch_rule>
21875 at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21876   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21877   auto maybe_layer = maybeCurrentDynamicLayer();
21878   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21879   int64_t cur_level = maybe_layer->layerId();
21880   if (!isBatchedAtLevel(self, cur_level)) {
21881     return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
21882   }
21883   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21884   auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
21885   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21886 }
21887 template <typename batch_rule_t, batch_rule_t batch_rule>
21888 at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21889   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21890   auto maybe_layer = maybeCurrentDynamicLayer();
21891   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21892   int64_t cur_level = maybe_layer->layerId();
21893   if (!isBatchedAtLevel(grad_output, cur_level)) {
21894     return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
21895   }
21896   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21897   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
21898   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21899 }
21900 template <typename batch_rule_t, batch_rule_t batch_rule>
21901 at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21902   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21903   auto maybe_layer = maybeCurrentDynamicLayer();
21904   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21905   int64_t cur_level = maybe_layer->layerId();
21906   if (!isBatchedAtLevel(self, cur_level)) {
21907     return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
21908   }
21909   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21910   auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w);
21911   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21912 }
21913 template <typename batch_rule_t, batch_rule_t batch_rule>
21914 at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21915   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21916   auto maybe_layer = maybeCurrentDynamicLayer();
21917   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21918   int64_t cur_level = maybe_layer->layerId();
21919   if (!isBatchedAtLevel(grad_output, cur_level)) {
21920     return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
21921   }
21922   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21923   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
21924   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21925 }
21926 template <typename batch_rule_t, batch_rule_t batch_rule>
21927 at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
21928   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21929   auto maybe_layer = maybeCurrentDynamicLayer();
21930   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21931   int64_t cur_level = maybe_layer->layerId();
21932   if (!isBatchedAtLevel(self, cur_level)) {
21933     return at::_ops::upsample_nearest1d::call(self, output_size, scales);
21934   }
21935   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21936   auto results = batch_rule(self_value, self_bdim, output_size, scales);
21937   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21938 }
21939 template <typename batch_rule_t, batch_rule_t batch_rule>
21940 at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
21941   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21942   auto maybe_layer = maybeCurrentDynamicLayer();
21943   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21944   int64_t cur_level = maybe_layer->layerId();
21945   if (!isBatchedAtLevel(self, cur_level)) {
21946     return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
21947   }
21948   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21949   auto results = batch_rule(self_value, self_bdim, output_size, scales);
21950   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21951 }
21952 template <typename batch_rule_t, batch_rule_t batch_rule>
21953 at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
21954   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21955   auto maybe_layer = maybeCurrentDynamicLayer();
21956   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21957   int64_t cur_level = maybe_layer->layerId();
21958   if (!isBatchedAtLevel(grad_output, cur_level)) {
21959     return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
21960   }
21961   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21962   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
21963   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21964 }
21965 template <typename batch_rule_t, batch_rule_t batch_rule>
21966 at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
21967   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21968   auto maybe_layer = maybeCurrentDynamicLayer();
21969   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21970   int64_t cur_level = maybe_layer->layerId();
21971   if (!isBatchedAtLevel(grad_output, cur_level)) {
21972     return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
21973   }
21974   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
21975   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
21976   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21977 }
21978 template <typename batch_rule_t, batch_rule_t batch_rule>
21979 at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21980   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21981   auto maybe_layer = maybeCurrentDynamicLayer();
21982   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21983   int64_t cur_level = maybe_layer->layerId();
21984   if (!isBatchedAtLevel(self, cur_level)) {
21985     return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
21986   }
21987   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
21988   auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
21989   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21990 }
21991 template <typename batch_rule_t, batch_rule_t batch_rule>
21992 at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
21993   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21994   auto maybe_layer = maybeCurrentDynamicLayer();
21995   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21996   int64_t cur_level = maybe_layer->layerId();
21997   if (!isBatchedAtLevel(self, cur_level)) {
21998     return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
21999   }
22000   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22001   auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
22002   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22003 }
22004 template <typename batch_rule_t, batch_rule_t batch_rule>
22005 at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
22006   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22007   auto maybe_layer = maybeCurrentDynamicLayer();
22008   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22009   int64_t cur_level = maybe_layer->layerId();
22010   if (!isBatchedAtLevel(grad_output, cur_level)) {
22011     return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
22012   }
22013   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22014   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
22015   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22016 }
22017 template <typename batch_rule_t, batch_rule_t batch_rule>
22018 at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
22019   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22020   auto maybe_layer = maybeCurrentDynamicLayer();
22021   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22022   int64_t cur_level = maybe_layer->layerId();
22023   if (!isBatchedAtLevel(grad_output, cur_level)) {
22024     return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
22025   }
22026   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22027   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
22028   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22029 }
22030 template <typename batch_rule_t, batch_rule_t batch_rule>
22031 at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
22032   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22033   auto maybe_layer = maybeCurrentDynamicLayer();
22034   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22035   int64_t cur_level = maybe_layer->layerId();
22036   if (!isBatchedAtLevel(self, cur_level)) {
22037     return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
22038   }
22039   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22040   auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
22041   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22042 }
22043 template <typename batch_rule_t, batch_rule_t batch_rule>
22044 at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
22045   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22046   auto maybe_layer = maybeCurrentDynamicLayer();
22047   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22048   int64_t cur_level = maybe_layer->layerId();
22049   if (!isBatchedAtLevel(self, cur_level)) {
22050     return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
22051   }
22052   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22053   auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
22054   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22055 }
22056 template <typename batch_rule_t, batch_rule_t batch_rule>
22057 at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
22058   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22059   auto maybe_layer = maybeCurrentDynamicLayer();
22060   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22061   int64_t cur_level = maybe_layer->layerId();
22062   if (!isBatchedAtLevel(grad_output, cur_level)) {
22063     return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
22064   }
22065   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22066   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
22067   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22068 }
22069 template <typename batch_rule_t, batch_rule_t batch_rule>
22070 at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
22071   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22072   auto maybe_layer = maybeCurrentDynamicLayer();
22073   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22074   int64_t cur_level = maybe_layer->layerId();
22075   if (!isBatchedAtLevel(grad_output, cur_level)) {
22076     return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
22077   }
22078   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22079   auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
22080   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22081 }
22082 template <typename batch_rule_t, batch_rule_t batch_rule>
22083 at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
22084   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22085   auto maybe_layer = maybeCurrentDynamicLayer();
22086   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22087   int64_t cur_level = maybe_layer->layerId();
22088   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
22089     return at::_ops::sigmoid_backward::call(grad_output, output);
22090   }
22091   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22092   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
22093   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
22094   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22095 }
22096 template <typename batch_rule_t, batch_rule_t batch_rule>
22097 at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps) {
22098   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22099   auto maybe_layer = maybeCurrentDynamicLayer();
22100   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22101   int64_t cur_level = maybe_layer->layerId();
22102   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
22103     return at::_ops::logit_backward::call(grad_output, self, eps);
22104   }
22105   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22106   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22107   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps);
22108   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22109 }
22110 template <typename batch_rule_t, batch_rule_t batch_rule>
22111 at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
22112   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22113   auto maybe_layer = maybeCurrentDynamicLayer();
22114   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22115   int64_t cur_level = maybe_layer->layerId();
22116   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
22117     return at::_ops::tanh_backward::call(grad_output, output);
22118   }
22119   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22120   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
22121   auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
22122   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22123 }
22124 template <typename batch_rule_t, batch_rule_t batch_rule>
22125 at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
22126   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22127   auto maybe_layer = maybeCurrentDynamicLayer();
22128   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22129   int64_t cur_level = maybe_layer->layerId();
22130   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22131     return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
22132   }
22133   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22134   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22135   std::optional<Tensor> bias_value;
22136   std::optional<int64_t> bias_bdim;
22137   if (bias) {
22138       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22139   }
22140   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
22141   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22142 }
22143 template <typename batch_rule_t, batch_rule_t batch_rule>
22144 at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
22145   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22146   auto maybe_layer = maybeCurrentDynamicLayer();
22147   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22148   int64_t cur_level = maybe_layer->layerId();
22149   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22150     return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
22151   }
22152   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22153   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22154   std::optional<Tensor> bias_value;
22155   std::optional<int64_t> bias_bdim;
22156   if (bias) {
22157       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22158   }
22159   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
22160   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22161 }
22162 template <typename batch_rule_t, batch_rule_t batch_rule>
22163 at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
22164   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22165   auto maybe_layer = maybeCurrentDynamicLayer();
22166   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22167   int64_t cur_level = maybe_layer->layerId();
22168   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22169     return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding);
22170   }
22171   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22172   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22173   std::optional<Tensor> bias_value;
22174   std::optional<int64_t> bias_bdim;
22175   if (bias) {
22176       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22177   }
22178   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
22179   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22180 }
22181 template <typename batch_rule_t, batch_rule_t batch_rule>
22182 at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
22183   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22184   auto maybe_layer = maybeCurrentDynamicLayer();
22185   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22186   int64_t cur_level = maybe_layer->layerId();
22187   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22188     return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding);
22189   }
22190   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22191   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22192   std::optional<Tensor> bias_value;
22193   std::optional<int64_t> bias_bdim;
22194   if (bias) {
22195       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22196   }
22197   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
22198   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22199 }
22200 template <typename batch_rule_t, batch_rule_t batch_rule>
22201 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
22202   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22203   auto maybe_layer = maybeCurrentDynamicLayer();
22204   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22205   int64_t cur_level = maybe_layer->layerId();
22206   if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
22207     return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
22208   }
22209   auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level);
22210   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22211   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22212   auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask);
22213   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
22214 }
22215 template <typename batch_rule_t, batch_rule_t batch_rule>
22216 at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
22217   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22218   auto maybe_layer = maybeCurrentDynamicLayer();
22219   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22220   int64_t cur_level = maybe_layer->layerId();
22221   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22222     return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
22223   }
22224   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22225   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22226   std::optional<Tensor> bias_value;
22227   std::optional<int64_t> bias_bdim;
22228   if (bias) {
22229       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22230   }
22231   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
22232   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22233 }
22234 template <typename batch_rule_t, batch_rule_t batch_rule>
22235 at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
22236   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22237   auto maybe_layer = maybeCurrentDynamicLayer();
22238   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22239   int64_t cur_level = maybe_layer->layerId();
22240   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22241     return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
22242   }
22243   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22244   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22245   std::optional<Tensor> bias_value;
22246   std::optional<int64_t> bias_bdim;
22247   if (bias) {
22248       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22249   }
22250   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
22251   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22252 }
22253 template <typename batch_rule_t, batch_rule_t batch_rule>
22254 at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
22255   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22256   auto maybe_layer = maybeCurrentDynamicLayer();
22257   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22258   int64_t cur_level = maybe_layer->layerId();
22259   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22260     return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
22261   }
22262   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22263   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22264   std::optional<Tensor> bias_value;
22265   std::optional<int64_t> bias_bdim;
22266   if (bias) {
22267       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22268   }
22269   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
22270   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22271 }
22272 template <typename batch_rule_t, batch_rule_t batch_rule>
22273 at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
22274   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22275   auto maybe_layer = maybeCurrentDynamicLayer();
22276   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22277   int64_t cur_level = maybe_layer->layerId();
22278   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22279     return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
22280   }
22281   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22282   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22283   std::optional<Tensor> bias_value;
22284   std::optional<int64_t> bias_bdim;
22285   if (bias) {
22286       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22287   }
22288   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
22289   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22290 }
22291 template <typename batch_rule_t, batch_rule_t batch_rule>
22292 at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
22293   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22294   auto maybe_layer = maybeCurrentDynamicLayer();
22295   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22296   int64_t cur_level = maybe_layer->layerId();
22297   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22298     return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
22299   }
22300   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22301   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22302   std::optional<Tensor> bias_value;
22303   std::optional<int64_t> bias_bdim;
22304   if (bias) {
22305       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22306   }
22307   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
22308   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22309 }
22310 template <typename batch_rule_t, batch_rule_t batch_rule>
22311 at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
22312   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22313   auto maybe_layer = maybeCurrentDynamicLayer();
22314   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22315   int64_t cur_level = maybe_layer->layerId();
22316   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
22317     return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
22318   }
22319   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22320   auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level);
22321   std::optional<Tensor> bias_value;
22322   std::optional<int64_t> bias_bdim;
22323   if (bias) {
22324       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
22325   }
22326   auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
22327   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22328 }
22329 template <typename batch_rule_t, batch_rule_t batch_rule>
22330 at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
22331   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22332   auto maybe_layer = maybeCurrentDynamicLayer();
22333   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22334   int64_t cur_level = maybe_layer->layerId();
22335   if (!isBatchedAtLevel(self, cur_level)) {
22336     return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
22337   }
22338   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22339   auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride);
22340   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22341 }
22342 template <typename batch_rule_t, batch_rule_t batch_rule>
22343 at::Tensor column_stack_generated_plumbing(at::TensorList tensors) {
22344   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22345   auto maybe_layer = maybeCurrentDynamicLayer();
22346   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22347   int64_t cur_level = maybe_layer->layerId();
22348   if (!isBatchedAtLevel(tensors, cur_level)) {
22349     return at::_ops::column_stack::call(tensors);
22350   }
22351 
22352   auto results = batch_rule(tensors);
22353   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22354 }
22355 template <typename batch_rule_t, batch_rule_t batch_rule>
22356 at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
22357   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22358   auto maybe_layer = maybeCurrentDynamicLayer();
22359   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22360   int64_t cur_level = maybe_layer->layerId();
22361   if (!isBatchedAtLevel(self, cur_level)) {
22362     return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride);
22363   }
22364   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22365   auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride);
22366   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22367 }
22368 template <typename batch_rule_t, batch_rule_t batch_rule>
22369 at::Tensor isfinite_generated_plumbing(const at::Tensor & self) {
22370   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22371   auto maybe_layer = maybeCurrentDynamicLayer();
22372   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22373   int64_t cur_level = maybe_layer->layerId();
22374   if (!isBatchedAtLevel(self, cur_level)) {
22375     return at::_ops::isfinite::call(self);
22376   }
22377   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22378   auto results = batch_rule(self_value, self_bdim);
22379   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22380 }
22381 template <typename batch_rule_t, batch_rule_t batch_rule>
22382 at::Tensor isinf_generated_plumbing(const at::Tensor & self) {
22383   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22384   auto maybe_layer = maybeCurrentDynamicLayer();
22385   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22386   int64_t cur_level = maybe_layer->layerId();
22387   if (!isBatchedAtLevel(self, cur_level)) {
22388     return at::_ops::isinf::call(self);
22389   }
22390   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22391   auto results = batch_rule(self_value, self_bdim);
22392   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22393 }
22394 template <typename batch_rule_t, batch_rule_t batch_rule>
22395 void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) {
22396   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22397   auto maybe_layer = maybeCurrentDynamicLayer();
22398   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22399   int64_t cur_level = maybe_layer->layerId();
22400   if (!isBatchedAtLevel(self, cur_level)) {
22401     return at::_ops::record_stream::call(self, s);
22402   }
22403   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22404   batch_rule(self_value, self_bdim, s);
22405 }
22406 template <typename batch_rule_t, batch_rule_t batch_rule>
22407 at::Tensor isposinf_generated_plumbing(const at::Tensor & self) {
22408   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22409   auto maybe_layer = maybeCurrentDynamicLayer();
22410   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22411   int64_t cur_level = maybe_layer->layerId();
22412   if (!isBatchedAtLevel(self, cur_level)) {
22413     return at::_ops::isposinf::call(self);
22414   }
22415   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22416   auto results = batch_rule(self_value, self_bdim);
22417   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22418 }
22419 template <typename batch_rule_t, batch_rule_t batch_rule>
22420 at::Tensor isneginf_generated_plumbing(const at::Tensor & self) {
22421   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22422   auto maybe_layer = maybeCurrentDynamicLayer();
22423   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22424   int64_t cur_level = maybe_layer->layerId();
22425   if (!isBatchedAtLevel(self, cur_level)) {
22426     return at::_ops::isneginf::call(self);
22427   }
22428   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22429   auto results = batch_rule(self_value, self_bdim);
22430   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22431 }
22432 template <typename batch_rule_t, batch_rule_t batch_rule>
22433 at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) {
22434   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22435   auto maybe_layer = maybeCurrentDynamicLayer();
22436   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22437   int64_t cur_level = maybe_layer->layerId();
22438   if (!isBatchedAtLevel(self, cur_level)) {
22439     return at::_ops::_add_batch_dim::call(self, batch_dim, level);
22440   }
22441   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22442   auto results = batch_rule(self_value, self_bdim, batch_dim, level);
22443   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22444 }
22445 template <typename batch_rule_t, batch_rule_t batch_rule>
22446 at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, c10::SymInt batch_size, int64_t out_dim) {
22447   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22448   auto maybe_layer = maybeCurrentDynamicLayer();
22449   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22450   int64_t cur_level = maybe_layer->layerId();
22451   if (!isBatchedAtLevel(self, cur_level)) {
22452     return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
22453   }
22454   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22455   auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim);
22456   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22457 }
22458 template <typename batch_rule_t, batch_rule_t batch_rule>
22459 at::Tensor special_entr_generated_plumbing(const at::Tensor & self) {
22460   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22461   auto maybe_layer = maybeCurrentDynamicLayer();
22462   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22463   int64_t cur_level = maybe_layer->layerId();
22464   if (!isBatchedAtLevel(self, cur_level)) {
22465     return at::_ops::special_entr::call(self);
22466   }
22467   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22468   auto results = batch_rule(self_value, self_bdim);
22469   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22470 }
22471 template <typename batch_rule_t, batch_rule_t batch_rule>
22472 at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) {
22473   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22474   auto maybe_layer = maybeCurrentDynamicLayer();
22475   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22476   int64_t cur_level = maybe_layer->layerId();
22477   if (!isBatchedAtLevel(self, cur_level)) {
22478     return at::_ops::special_ndtri::call(self);
22479   }
22480   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22481   auto results = batch_rule(self_value, self_bdim);
22482   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22483 }
22484 template <typename batch_rule_t, batch_rule_t batch_rule>
22485 at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) {
22486   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22487   auto maybe_layer = maybeCurrentDynamicLayer();
22488   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22489   int64_t cur_level = maybe_layer->layerId();
22490   if (!isBatchedAtLevel(self, cur_level)) {
22491     return at::_ops::special_log_ndtr::call(self);
22492   }
22493   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22494   auto results = batch_rule(self_value, self_bdim);
22495   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22496 }
22497 template <typename batch_rule_t, batch_rule_t batch_rule>
22498 at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) {
22499   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22500   auto maybe_layer = maybeCurrentDynamicLayer();
22501   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22502   int64_t cur_level = maybe_layer->layerId();
22503   if (!isBatchedAtLevel(self, cur_level)) {
22504     return at::_ops::special_expm1::call(self);
22505   }
22506   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22507   auto results = batch_rule(self_value, self_bdim);
22508   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22509 }
22510 template <typename batch_rule_t, batch_rule_t batch_rule>
22511 at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) {
22512   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22513   auto maybe_layer = maybeCurrentDynamicLayer();
22514   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22515   int64_t cur_level = maybe_layer->layerId();
22516   if (!isBatchedAtLevel(self, cur_level)) {
22517     return at::_ops::special_exp2::call(self);
22518   }
22519   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22520   auto results = batch_rule(self_value, self_bdim);
22521   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22522 }
22523 template <typename batch_rule_t, batch_rule_t batch_rule>
22524 at::Tensor special_psi_generated_plumbing(const at::Tensor & self) {
22525   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22526   auto maybe_layer = maybeCurrentDynamicLayer();
22527   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22528   int64_t cur_level = maybe_layer->layerId();
22529   if (!isBatchedAtLevel(self, cur_level)) {
22530     return at::_ops::special_psi::call(self);
22531   }
22532   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22533   auto results = batch_rule(self_value, self_bdim);
22534   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22535 }
22536 template <typename batch_rule_t, batch_rule_t batch_rule>
22537 at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) {
22538   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22539   auto maybe_layer = maybeCurrentDynamicLayer();
22540   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22541   int64_t cur_level = maybe_layer->layerId();
22542   if (!isBatchedAtLevel(self, cur_level)) {
22543     return at::_ops::special_digamma::call(self);
22544   }
22545   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22546   auto results = batch_rule(self_value, self_bdim);
22547   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22548 }
22549 template <typename batch_rule_t, batch_rule_t batch_rule>
22550 at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) {
22551   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22552   auto maybe_layer = maybeCurrentDynamicLayer();
22553   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22554   int64_t cur_level = maybe_layer->layerId();
22555   if (!isBatchedAtLevel(self, cur_level)) {
22556     return at::_ops::special_gammaln::call(self);
22557   }
22558   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22559   auto results = batch_rule(self_value, self_bdim);
22560   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22561 }
22562 template <typename batch_rule_t, batch_rule_t batch_rule>
22563 at::Tensor special_erf_generated_plumbing(const at::Tensor & self) {
22564   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22565   auto maybe_layer = maybeCurrentDynamicLayer();
22566   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22567   int64_t cur_level = maybe_layer->layerId();
22568   if (!isBatchedAtLevel(self, cur_level)) {
22569     return at::_ops::special_erf::call(self);
22570   }
22571   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22572   auto results = batch_rule(self_value, self_bdim);
22573   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22574 }
22575 template <typename batch_rule_t, batch_rule_t batch_rule>
22576 at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) {
22577   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22578   auto maybe_layer = maybeCurrentDynamicLayer();
22579   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22580   int64_t cur_level = maybe_layer->layerId();
22581   if (!isBatchedAtLevel(self, cur_level)) {
22582     return at::_ops::special_erfc::call(self);
22583   }
22584   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22585   auto results = batch_rule(self_value, self_bdim);
22586   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22587 }
22588 template <typename batch_rule_t, batch_rule_t batch_rule>
22589 at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) {
22590   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22591   auto maybe_layer = maybeCurrentDynamicLayer();
22592   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22593   int64_t cur_level = maybe_layer->layerId();
22594   if (!isBatchedAtLevel(self, cur_level)) {
22595     return at::_ops::special_erfcx::call(self);
22596   }
22597   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22598   auto results = batch_rule(self_value, self_bdim);
22599   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22600 }
22601 template <typename batch_rule_t, batch_rule_t batch_rule>
22602 at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) {
22603   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22604   auto maybe_layer = maybeCurrentDynamicLayer();
22605   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22606   int64_t cur_level = maybe_layer->layerId();
22607   if (!isBatchedAtLevel(self, cur_level)) {
22608     return at::_ops::special_erfinv::call(self);
22609   }
22610   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22611   auto results = batch_rule(self_value, self_bdim);
22612   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22613 }
22614 template <typename batch_rule_t, batch_rule_t batch_rule>
22615 at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) {
22616   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22617   auto maybe_layer = maybeCurrentDynamicLayer();
22618   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22619   int64_t cur_level = maybe_layer->layerId();
22620   if (!isBatchedAtLevel(self, cur_level)) {
22621     return at::_ops::special_ndtr::call(self);
22622   }
22623   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22624   auto results = batch_rule(self_value, self_bdim);
22625   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22626 }
22627 template <typename batch_rule_t, batch_rule_t batch_rule>
22628 at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
22629   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22630   auto maybe_layer = maybeCurrentDynamicLayer();
22631   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22632   int64_t cur_level = maybe_layer->layerId();
22633   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
22634     return at::_ops::special_xlog1py::call(self, other);
22635   }
22636   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22637   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22638   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
22639   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22640 }
22641 template <typename batch_rule_t, batch_rule_t batch_rule>
22642 at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
22643   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22644   auto maybe_layer = maybeCurrentDynamicLayer();
22645   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22646   int64_t cur_level = maybe_layer->layerId();
22647   if (!isBatchedAtLevel(other, cur_level)) {
22648     return at::_ops::special_xlog1py_self_scalar::call(self, other);
22649   }
22650   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22651   auto results = batch_rule(self, other_value, other_bdim);
22652   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22653 }
22654 template <typename batch_rule_t, batch_rule_t batch_rule>
22655 at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
22656   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22657   auto maybe_layer = maybeCurrentDynamicLayer();
22658   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22659   int64_t cur_level = maybe_layer->layerId();
22660   if (!isBatchedAtLevel(self, cur_level)) {
22661     return at::_ops::special_xlog1py_other_scalar::call(self, other);
22662   }
22663   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22664   auto results = batch_rule(self_value, self_bdim, other);
22665   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22666 }
22667 template <typename batch_rule_t, batch_rule_t batch_rule>
22668 at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
22669   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22670   auto maybe_layer = maybeCurrentDynamicLayer();
22671   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22672   int64_t cur_level = maybe_layer->layerId();
22673   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
22674     return at::_ops::special_xlogy::call(self, other);
22675   }
22676   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22677   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22678   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
22679   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22680 }
22681 template <typename batch_rule_t, batch_rule_t batch_rule>
22682 at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
22683   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22684   auto maybe_layer = maybeCurrentDynamicLayer();
22685   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22686   int64_t cur_level = maybe_layer->layerId();
22687   if (!isBatchedAtLevel(other, cur_level)) {
22688     return at::_ops::special_xlogy_self_scalar::call(self, other);
22689   }
22690   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22691   auto results = batch_rule(self, other_value, other_bdim);
22692   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22693 }
22694 template <typename batch_rule_t, batch_rule_t batch_rule>
22695 at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
22696   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22697   auto maybe_layer = maybeCurrentDynamicLayer();
22698   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22699   int64_t cur_level = maybe_layer->layerId();
22700   if (!isBatchedAtLevel(self, cur_level)) {
22701     return at::_ops::special_xlogy_other_scalar::call(self, other);
22702   }
22703   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22704   auto results = batch_rule(self_value, self_bdim, other);
22705   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22706 }
22707 template <typename batch_rule_t, batch_rule_t batch_rule>
22708 at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
22709   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22710   auto maybe_layer = maybeCurrentDynamicLayer();
22711   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22712   int64_t cur_level = maybe_layer->layerId();
22713   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
22714     return at::_ops::special_zeta::call(self, other);
22715   }
22716   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22717   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22718   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
22719   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22720 }
22721 template <typename batch_rule_t, batch_rule_t batch_rule>
22722 at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
22723   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22724   auto maybe_layer = maybeCurrentDynamicLayer();
22725   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22726   int64_t cur_level = maybe_layer->layerId();
22727   if (!isBatchedAtLevel(other, cur_level)) {
22728     return at::_ops::special_zeta_self_scalar::call(self, other);
22729   }
22730   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22731   auto results = batch_rule(self, other_value, other_bdim);
22732   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22733 }
22734 template <typename batch_rule_t, batch_rule_t batch_rule>
22735 at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
22736   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22737   auto maybe_layer = maybeCurrentDynamicLayer();
22738   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22739   int64_t cur_level = maybe_layer->layerId();
22740   if (!isBatchedAtLevel(self, cur_level)) {
22741     return at::_ops::special_zeta_other_scalar::call(self, other);
22742   }
22743   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22744   auto results = batch_rule(self_value, self_bdim, other);
22745   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22746 }
22747 template <typename batch_rule_t, batch_rule_t batch_rule>
22748 at::Tensor special_i0_generated_plumbing(const at::Tensor & self) {
22749   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22750   auto maybe_layer = maybeCurrentDynamicLayer();
22751   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22752   int64_t cur_level = maybe_layer->layerId();
22753   if (!isBatchedAtLevel(self, cur_level)) {
22754     return at::_ops::special_i0::call(self);
22755   }
22756   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22757   auto results = batch_rule(self_value, self_bdim);
22758   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22759 }
22760 template <typename batch_rule_t, batch_rule_t batch_rule>
22761 at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) {
22762   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22763   auto maybe_layer = maybeCurrentDynamicLayer();
22764   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22765   int64_t cur_level = maybe_layer->layerId();
22766   if (!isBatchedAtLevel(self, cur_level)) {
22767     return at::_ops::special_i0e::call(self);
22768   }
22769   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22770   auto results = batch_rule(self_value, self_bdim);
22771   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22772 }
22773 template <typename batch_rule_t, batch_rule_t batch_rule>
22774 at::Tensor special_i1_generated_plumbing(const at::Tensor & self) {
22775   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22776   auto maybe_layer = maybeCurrentDynamicLayer();
22777   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22778   int64_t cur_level = maybe_layer->layerId();
22779   if (!isBatchedAtLevel(self, cur_level)) {
22780     return at::_ops::special_i1::call(self);
22781   }
22782   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22783   auto results = batch_rule(self_value, self_bdim);
22784   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22785 }
22786 template <typename batch_rule_t, batch_rule_t batch_rule>
22787 at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) {
22788   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22789   auto maybe_layer = maybeCurrentDynamicLayer();
22790   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22791   int64_t cur_level = maybe_layer->layerId();
22792   if (!isBatchedAtLevel(self, cur_level)) {
22793     return at::_ops::special_i1e::call(self);
22794   }
22795   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22796   auto results = batch_rule(self_value, self_bdim);
22797   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22798 }
22799 template <typename batch_rule_t, batch_rule_t batch_rule>
22800 at::Tensor special_logit_generated_plumbing(const at::Tensor & self, ::std::optional<double> eps) {
22801   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22802   auto maybe_layer = maybeCurrentDynamicLayer();
22803   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22804   int64_t cur_level = maybe_layer->layerId();
22805   if (!isBatchedAtLevel(self, cur_level)) {
22806     return at::_ops::special_logit::call(self, eps);
22807   }
22808   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22809   auto results = batch_rule(self_value, self_bdim, eps);
22810   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22811 }
22812 template <typename batch_rule_t, batch_rule_t batch_rule>
22813 at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
22814   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22815   auto maybe_layer = maybeCurrentDynamicLayer();
22816   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22817   int64_t cur_level = maybe_layer->layerId();
22818   if (!isBatchedAtLevel(self, cur_level)) {
22819     return at::_ops::special_polygamma::call(n, self);
22820   }
22821   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22822   auto results = batch_rule(n, self_value, self_bdim);
22823   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22824 }
22825 template <typename batch_rule_t, batch_rule_t batch_rule>
22826 at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
22827   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22828   auto maybe_layer = maybeCurrentDynamicLayer();
22829   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22830   int64_t cur_level = maybe_layer->layerId();
22831   if (!isBatchedAtLevel(self, cur_level)) {
22832     return at::_ops::special_logsumexp::call(self, dim, keepdim);
22833   }
22834   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22835   auto results = batch_rule(self_value, self_bdim, dim, keepdim);
22836   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22837 }
22838 template <typename batch_rule_t, batch_rule_t batch_rule>
22839 at::Tensor special_expit_generated_plumbing(const at::Tensor & self) {
22840   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22841   auto maybe_layer = maybeCurrentDynamicLayer();
22842   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22843   int64_t cur_level = maybe_layer->layerId();
22844   if (!isBatchedAtLevel(self, cur_level)) {
22845     return at::_ops::special_expit::call(self);
22846   }
22847   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22848   auto results = batch_rule(self_value, self_bdim);
22849   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22850 }
22851 template <typename batch_rule_t, batch_rule_t batch_rule>
22852 at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) {
22853   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22854   auto maybe_layer = maybeCurrentDynamicLayer();
22855   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22856   int64_t cur_level = maybe_layer->layerId();
22857   if (!isBatchedAtLevel(self, cur_level)) {
22858     return at::_ops::special_sinc::call(self);
22859   }
22860   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22861   auto results = batch_rule(self_value, self_bdim);
22862   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22863 }
22864 template <typename batch_rule_t, batch_rule_t batch_rule>
22865 at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) {
22866   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22867   auto maybe_layer = maybeCurrentDynamicLayer();
22868   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22869   int64_t cur_level = maybe_layer->layerId();
22870   if (!isBatchedAtLevel(self, cur_level)) {
22871     return at::_ops::special_round::call(self, decimals);
22872   }
22873   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22874   auto results = batch_rule(self_value, self_bdim, decimals);
22875   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22876 }
22877 template <typename batch_rule_t, batch_rule_t batch_rule>
22878 at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) {
22879   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22880   auto maybe_layer = maybeCurrentDynamicLayer();
22881   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22882   int64_t cur_level = maybe_layer->layerId();
22883   if (!isBatchedAtLevel(self, cur_level)) {
22884     return at::_ops::special_log1p::call(self);
22885   }
22886   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22887   auto results = batch_rule(self_value, self_bdim);
22888   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22889 }
22890 template <typename batch_rule_t, batch_rule_t batch_rule>
22891 at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
22892   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22893   auto maybe_layer = maybeCurrentDynamicLayer();
22894   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22895   int64_t cur_level = maybe_layer->layerId();
22896   if (!isBatchedAtLevel(self, cur_level)) {
22897     return at::_ops::special_log_softmax::call(self, dim, dtype);
22898   }
22899   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22900   auto results = batch_rule(self_value, self_bdim, dim, dtype);
22901   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22902 }
22903 template <typename batch_rule_t, batch_rule_t batch_rule>
22904 at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
22905   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22906   auto maybe_layer = maybeCurrentDynamicLayer();
22907   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22908   int64_t cur_level = maybe_layer->layerId();
22909   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
22910     return at::_ops::special_gammainc::call(self, other);
22911   }
22912   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22913   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22914   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
22915   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22916 }
22917 template <typename batch_rule_t, batch_rule_t batch_rule>
22918 at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
22919   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22920   auto maybe_layer = maybeCurrentDynamicLayer();
22921   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22922   int64_t cur_level = maybe_layer->layerId();
22923   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
22924     return at::_ops::special_gammaincc::call(self, other);
22925   }
22926   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22927   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
22928   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
22929   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22930 }
22931 template <typename batch_rule_t, batch_rule_t batch_rule>
22932 at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) {
22933   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22934   auto maybe_layer = maybeCurrentDynamicLayer();
22935   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22936   int64_t cur_level = maybe_layer->layerId();
22937   if (!isBatchedAtLevel(self, cur_level)) {
22938     return at::_ops::special_multigammaln::call(self, p);
22939   }
22940   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22941   auto results = batch_rule(self_value, self_bdim, p);
22942   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22943 }
22944 template <typename batch_rule_t, batch_rule_t batch_rule>
22945 at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
22946   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22947   auto maybe_layer = maybeCurrentDynamicLayer();
22948   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22949   int64_t cur_level = maybe_layer->layerId();
22950   if (!isBatchedAtLevel(self, cur_level)) {
22951     return at::_ops::special_softmax::call(self, dim, dtype);
22952   }
22953   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22954   auto results = batch_rule(self_value, self_bdim, dim, dtype);
22955   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22956 }
22957 template <typename batch_rule_t, batch_rule_t batch_rule>
22958 at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
22959   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22960   auto maybe_layer = maybeCurrentDynamicLayer();
22961   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22962   int64_t cur_level = maybe_layer->layerId();
22963   if (!isBatchedAtLevel(self, cur_level)) {
22964     return at::_ops::fft_fft::call(self, n, dim, norm);
22965   }
22966   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22967   auto results = batch_rule(self_value, self_bdim, n, dim, norm);
22968   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22969 }
22970 template <typename batch_rule_t, batch_rule_t batch_rule>
22971 at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
22972   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22973   auto maybe_layer = maybeCurrentDynamicLayer();
22974   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22975   int64_t cur_level = maybe_layer->layerId();
22976   if (!isBatchedAtLevel(self, cur_level)) {
22977     return at::_ops::fft_ifft::call(self, n, dim, norm);
22978   }
22979   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22980   auto results = batch_rule(self_value, self_bdim, n, dim, norm);
22981   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22982 }
22983 template <typename batch_rule_t, batch_rule_t batch_rule>
22984 at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
22985   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22986   auto maybe_layer = maybeCurrentDynamicLayer();
22987   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22988   int64_t cur_level = maybe_layer->layerId();
22989   if (!isBatchedAtLevel(self, cur_level)) {
22990     return at::_ops::fft_rfft::call(self, n, dim, norm);
22991   }
22992   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
22993   auto results = batch_rule(self_value, self_bdim, n, dim, norm);
22994   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22995 }
22996 template <typename batch_rule_t, batch_rule_t batch_rule>
22997 at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
22998   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22999   auto maybe_layer = maybeCurrentDynamicLayer();
23000   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23001   int64_t cur_level = maybe_layer->layerId();
23002   if (!isBatchedAtLevel(self, cur_level)) {
23003     return at::_ops::fft_irfft::call(self, n, dim, norm);
23004   }
23005   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23006   auto results = batch_rule(self_value, self_bdim, n, dim, norm);
23007   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23008 }
23009 template <typename batch_rule_t, batch_rule_t batch_rule>
23010 at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
23011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23012   auto maybe_layer = maybeCurrentDynamicLayer();
23013   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23014   int64_t cur_level = maybe_layer->layerId();
23015   if (!isBatchedAtLevel(self, cur_level)) {
23016     return at::_ops::fft_hfft::call(self, n, dim, norm);
23017   }
23018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23019   auto results = batch_rule(self_value, self_bdim, n, dim, norm);
23020   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23021 }
23022 template <typename batch_rule_t, batch_rule_t batch_rule>
23023 at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
23024   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23025   auto maybe_layer = maybeCurrentDynamicLayer();
23026   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23027   int64_t cur_level = maybe_layer->layerId();
23028   if (!isBatchedAtLevel(self, cur_level)) {
23029     return at::_ops::fft_ihfft::call(self, n, dim, norm);
23030   }
23031   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23032   auto results = batch_rule(self_value, self_bdim, n, dim, norm);
23033   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23034 }
23035 template <typename batch_rule_t, batch_rule_t batch_rule>
23036 at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
23037   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23038   auto maybe_layer = maybeCurrentDynamicLayer();
23039   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23040   int64_t cur_level = maybe_layer->layerId();
23041   if (!isBatchedAtLevel(self, cur_level)) {
23042     return at::_ops::fft_fft2::call(self, s, dim, norm);
23043   }
23044   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23045   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23046   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23047 }
23048 template <typename batch_rule_t, batch_rule_t batch_rule>
23049 at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
23050   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23051   auto maybe_layer = maybeCurrentDynamicLayer();
23052   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23053   int64_t cur_level = maybe_layer->layerId();
23054   if (!isBatchedAtLevel(self, cur_level)) {
23055     return at::_ops::fft_ifft2::call(self, s, dim, norm);
23056   }
23057   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23058   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23059   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23060 }
23061 template <typename batch_rule_t, batch_rule_t batch_rule>
23062 at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
23063   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23064   auto maybe_layer = maybeCurrentDynamicLayer();
23065   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23066   int64_t cur_level = maybe_layer->layerId();
23067   if (!isBatchedAtLevel(self, cur_level)) {
23068     return at::_ops::fft_rfft2::call(self, s, dim, norm);
23069   }
23070   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23071   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23072   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23073 }
23074 template <typename batch_rule_t, batch_rule_t batch_rule>
23075 at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
23076   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23077   auto maybe_layer = maybeCurrentDynamicLayer();
23078   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23079   int64_t cur_level = maybe_layer->layerId();
23080   if (!isBatchedAtLevel(self, cur_level)) {
23081     return at::_ops::fft_irfft2::call(self, s, dim, norm);
23082   }
23083   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23084   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23085   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23086 }
23087 template <typename batch_rule_t, batch_rule_t batch_rule>
23088 at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
23089   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23090   auto maybe_layer = maybeCurrentDynamicLayer();
23091   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23092   int64_t cur_level = maybe_layer->layerId();
23093   if (!isBatchedAtLevel(self, cur_level)) {
23094     return at::_ops::fft_hfft2::call(self, s, dim, norm);
23095   }
23096   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23097   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23098   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23099 }
23100 template <typename batch_rule_t, batch_rule_t batch_rule>
23101 at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
23102   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23103   auto maybe_layer = maybeCurrentDynamicLayer();
23104   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23105   int64_t cur_level = maybe_layer->layerId();
23106   if (!isBatchedAtLevel(self, cur_level)) {
23107     return at::_ops::fft_ihfft2::call(self, s, dim, norm);
23108   }
23109   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23110   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23111   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23112 }
23113 template <typename batch_rule_t, batch_rule_t batch_rule>
23114 at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
23115   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23116   auto maybe_layer = maybeCurrentDynamicLayer();
23117   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23118   int64_t cur_level = maybe_layer->layerId();
23119   if (!isBatchedAtLevel(self, cur_level)) {
23120     return at::_ops::fft_fftn::call(self, s, dim, norm);
23121   }
23122   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23123   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23124   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23125 }
23126 template <typename batch_rule_t, batch_rule_t batch_rule>
23127 at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
23128   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23129   auto maybe_layer = maybeCurrentDynamicLayer();
23130   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23131   int64_t cur_level = maybe_layer->layerId();
23132   if (!isBatchedAtLevel(self, cur_level)) {
23133     return at::_ops::fft_ifftn::call(self, s, dim, norm);
23134   }
23135   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23136   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23137   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23138 }
23139 template <typename batch_rule_t, batch_rule_t batch_rule>
23140 at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
23141   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23142   auto maybe_layer = maybeCurrentDynamicLayer();
23143   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23144   int64_t cur_level = maybe_layer->layerId();
23145   if (!isBatchedAtLevel(self, cur_level)) {
23146     return at::_ops::fft_rfftn::call(self, s, dim, norm);
23147   }
23148   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23149   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23150   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23151 }
23152 template <typename batch_rule_t, batch_rule_t batch_rule>
23153 at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
23154   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23155   auto maybe_layer = maybeCurrentDynamicLayer();
23156   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23157   int64_t cur_level = maybe_layer->layerId();
23158   if (!isBatchedAtLevel(self, cur_level)) {
23159     return at::_ops::fft_irfftn::call(self, s, dim, norm);
23160   }
23161   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23162   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23163   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23164 }
23165 template <typename batch_rule_t, batch_rule_t batch_rule>
23166 at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
23167   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23168   auto maybe_layer = maybeCurrentDynamicLayer();
23169   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23170   int64_t cur_level = maybe_layer->layerId();
23171   if (!isBatchedAtLevel(self, cur_level)) {
23172     return at::_ops::fft_hfftn::call(self, s, dim, norm);
23173   }
23174   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23175   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23176   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23177 }
23178 template <typename batch_rule_t, batch_rule_t batch_rule>
23179 at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
23180   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23181   auto maybe_layer = maybeCurrentDynamicLayer();
23182   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23183   int64_t cur_level = maybe_layer->layerId();
23184   if (!isBatchedAtLevel(self, cur_level)) {
23185     return at::_ops::fft_ihfftn::call(self, s, dim, norm);
23186   }
23187   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23188   auto results = batch_rule(self_value, self_bdim, s, dim, norm);
23189   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23190 }
23191 template <typename batch_rule_t, batch_rule_t batch_rule>
23192 at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
23193   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23194   auto maybe_layer = maybeCurrentDynamicLayer();
23195   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23196   int64_t cur_level = maybe_layer->layerId();
23197   if (!isBatchedAtLevel(self, cur_level)) {
23198     return at::_ops::fft_fftshift::call(self, dim);
23199   }
23200   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23201   auto results = batch_rule(self_value, self_bdim, dim);
23202   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23203 }
23204 template <typename batch_rule_t, batch_rule_t batch_rule>
23205 at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
23206   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23207   auto maybe_layer = maybeCurrentDynamicLayer();
23208   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23209   int64_t cur_level = maybe_layer->layerId();
23210   if (!isBatchedAtLevel(self, cur_level)) {
23211     return at::_ops::fft_ifftshift::call(self, dim);
23212   }
23213   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23214   auto results = batch_rule(self_value, self_bdim, dim);
23215   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23216 }
23217 template <typename batch_rule_t, batch_rule_t batch_rule>
23218 ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) {
23219   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23220   auto maybe_layer = maybeCurrentDynamicLayer();
23221   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23222   int64_t cur_level = maybe_layer->layerId();
23223   if (!isBatchedAtLevel(self, cur_level)) {
23224     return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
23225   }
23226   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23227   auto results = batch_rule(self_value, self_bdim, upper, check_errors);
23228   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23229 }
23230 template <typename batch_rule_t, batch_rule_t batch_rule>
23231 at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
23232   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23233   auto maybe_layer = maybeCurrentDynamicLayer();
23234   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23235   int64_t cur_level = maybe_layer->layerId();
23236   if (!isBatchedAtLevel(self, cur_level)) {
23237     return at::_ops::linalg_cholesky::call(self, upper);
23238   }
23239   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23240   auto results = batch_rule(self_value, self_bdim, upper);
23241   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23242 }
23243 template <typename batch_rule_t, batch_rule_t batch_rule>
23244 at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
23245   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23246   auto maybe_layer = maybeCurrentDynamicLayer();
23247   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23248   int64_t cur_level = maybe_layer->layerId();
23249   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
23250     return at::_ops::linalg_cross::call(self, other, dim);
23251   }
23252   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23253   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
23254   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
23255   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23256 }
23257 template <typename batch_rule_t, batch_rule_t batch_rule>
23258 ::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) {
23259   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23260   auto maybe_layer = maybeCurrentDynamicLayer();
23261   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23262   int64_t cur_level = maybe_layer->layerId();
23263   if (!isBatchedAtLevel(A, cur_level)) {
23264     return at::_ops::linalg_lu_factor::call(A, pivot);
23265   }
23266   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23267   auto results = batch_rule(A_value, A_bdim, pivot);
23268   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23269 }
23270 template <typename batch_rule_t, batch_rule_t batch_rule>
23271 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) {
23272   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23273   auto maybe_layer = maybeCurrentDynamicLayer();
23274   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23275   int64_t cur_level = maybe_layer->layerId();
23276   if (!isBatchedAtLevel(A, cur_level)) {
23277     return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors);
23278   }
23279   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23280   auto results = batch_rule(A_value, A_bdim, pivot, check_errors);
23281   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
23282 }
23283 template <typename batch_rule_t, batch_rule_t batch_rule>
23284 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) {
23285   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23286   auto maybe_layer = maybeCurrentDynamicLayer();
23287   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23288   int64_t cur_level = maybe_layer->layerId();
23289   if (!isBatchedAtLevel(A, cur_level)) {
23290     return at::_ops::linalg_lu::call(A, pivot);
23291   }
23292   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23293   auto results = batch_rule(A_value, A_bdim, pivot);
23294   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
23295 }
23296 template <typename batch_rule_t, batch_rule_t batch_rule>
23297 at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
23298   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23299   auto maybe_layer = maybeCurrentDynamicLayer();
23300   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23301   int64_t cur_level = maybe_layer->layerId();
23302   if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
23303     return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
23304   }
23305   auto [LU_value, LU_bdim] = unwrapTensorAtLevel(LU, cur_level);
23306   auto [pivots_value, pivots_bdim] = unwrapTensorAtLevel(pivots, cur_level);
23307   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
23308   auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint);
23309   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23310 }
23311 template <typename batch_rule_t, batch_rule_t batch_rule>
23312 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det_generated_plumbing(const at::Tensor & A) {
23313   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23314   auto maybe_layer = maybeCurrentDynamicLayer();
23315   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23316   int64_t cur_level = maybe_layer->layerId();
23317   if (!isBatchedAtLevel(A, cur_level)) {
23318     return at::_ops::_linalg_det::call(A);
23319   }
23320   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23321   auto results = batch_rule(A_value, A_bdim);
23322   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
23323 }
23324 template <typename batch_rule_t, batch_rule_t batch_rule>
23325 at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) {
23326   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23327   auto maybe_layer = maybeCurrentDynamicLayer();
23328   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23329   int64_t cur_level = maybe_layer->layerId();
23330   if (!isBatchedAtLevel(A, cur_level)) {
23331     return at::_ops::linalg_det::call(A);
23332   }
23333   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23334   auto results = batch_rule(A_value, A_bdim);
23335   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23336 }
23337 template <typename batch_rule_t, batch_rule_t batch_rule>
23338 at::Tensor det_generated_plumbing(const at::Tensor & self) {
23339   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23340   auto maybe_layer = maybeCurrentDynamicLayer();
23341   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23342   int64_t cur_level = maybe_layer->layerId();
23343   if (!isBatchedAtLevel(self, cur_level)) {
23344     return at::_ops::det::call(self);
23345   }
23346   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23347   auto results = batch_rule(self_value, self_bdim);
23348   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23349 }
23350 template <typename batch_rule_t, batch_rule_t batch_rule>
23351 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) {
23352   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23353   auto maybe_layer = maybeCurrentDynamicLayer();
23354   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23355   int64_t cur_level = maybe_layer->layerId();
23356   if (!isBatchedAtLevel(self, cur_level)) {
23357     return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors);
23358   }
23359   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23360   auto results = batch_rule(self_value, self_bdim, hermitian, check_errors);
23361   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
23362 }
23363 template <typename batch_rule_t, batch_rule_t batch_rule>
23364 ::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) {
23365   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23366   auto maybe_layer = maybeCurrentDynamicLayer();
23367   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23368   int64_t cur_level = maybe_layer->layerId();
23369   if (!isBatchedAtLevel(self, cur_level)) {
23370     return at::_ops::linalg_ldl_factor::call(self, hermitian);
23371   }
23372   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23373   auto results = batch_rule(self_value, self_bdim, hermitian);
23374   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23375 }
23376 template <typename batch_rule_t, batch_rule_t batch_rule>
23377 at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
23378   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23379   auto maybe_layer = maybeCurrentDynamicLayer();
23380   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23381   int64_t cur_level = maybe_layer->layerId();
23382   if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
23383     return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
23384   }
23385   auto [LD_value, LD_bdim] = unwrapTensorAtLevel(LD, cur_level);
23386   auto [pivots_value, pivots_bdim] = unwrapTensorAtLevel(pivots, cur_level);
23387   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
23388   auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian);
23389   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23390 }
23391 template <typename batch_rule_t, batch_rule_t batch_rule>
23392 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver) {
23393   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23394   auto maybe_layer = maybeCurrentDynamicLayer();
23395   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23396   int64_t cur_level = maybe_layer->layerId();
23397   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) {
23398     return at::_ops::linalg_lstsq::call(self, b, rcond, driver);
23399   }
23400   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23401   auto [b_value, b_bdim] = unwrapTensorAtLevel(b, cur_level);
23402   auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver);
23403   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
23404 }
23405 template <typename batch_rule_t, batch_rule_t batch_rule>
23406 at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
23407   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23408   auto maybe_layer = maybeCurrentDynamicLayer();
23409   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23410   int64_t cur_level = maybe_layer->layerId();
23411   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
23412     return at::_ops::linalg_matmul::call(self, other);
23413   }
23414   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23415   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
23416   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
23417   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23418 }
23419 template <typename batch_rule_t, batch_rule_t batch_rule>
23420 at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
23421   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23422   auto maybe_layer = maybeCurrentDynamicLayer();
23423   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23424   int64_t cur_level = maybe_layer->layerId();
23425   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) {
23426     return at::_ops::linalg_vecdot::call(x, y, dim);
23427   }
23428   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
23429   auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level);
23430   auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim);
23431   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23432 }
23433 template <typename batch_rule_t, batch_rule_t batch_rule>
23434 at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) {
23435   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23436   auto maybe_layer = maybeCurrentDynamicLayer();
23437   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23438   int64_t cur_level = maybe_layer->layerId();
23439   if (!isBatchedAtLevel(self, cur_level)) {
23440     return at::_ops::linalg_matrix_exp::call(self);
23441   }
23442   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23443   auto results = batch_rule(self_value, self_bdim);
23444   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23445 }
23446 template <typename batch_rule_t, batch_rule_t batch_rule>
23447 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet_generated_plumbing(const at::Tensor & A) {
23448   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23449   auto maybe_layer = maybeCurrentDynamicLayer();
23450   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23451   int64_t cur_level = maybe_layer->layerId();
23452   if (!isBatchedAtLevel(A, cur_level)) {
23453     return at::_ops::_linalg_slogdet::call(A);
23454   }
23455   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23456   auto results = batch_rule(A_value, A_bdim);
23457   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
23458 }
23459 template <typename batch_rule_t, batch_rule_t batch_rule>
23460 ::std::tuple<at::Tensor,at::Tensor> linalg_slogdet_generated_plumbing(const at::Tensor & A) {
23461   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23462   auto maybe_layer = maybeCurrentDynamicLayer();
23463   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23464   int64_t cur_level = maybe_layer->layerId();
23465   if (!isBatchedAtLevel(A, cur_level)) {
23466     return at::_ops::linalg_slogdet::call(A);
23467   }
23468   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23469   auto results = batch_rule(A_value, A_bdim);
23470   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23471 }
23472 template <typename batch_rule_t, batch_rule_t batch_rule>
23473 ::std::tuple<at::Tensor,at::Tensor> slogdet_generated_plumbing(const at::Tensor & self) {
23474   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23475   auto maybe_layer = maybeCurrentDynamicLayer();
23476   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23477   int64_t cur_level = maybe_layer->layerId();
23478   if (!isBatchedAtLevel(self, cur_level)) {
23479     return at::_ops::slogdet::call(self);
23480   }
23481   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23482   auto results = batch_rule(self_value, self_bdim);
23483   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23484 }
23485 template <typename batch_rule_t, batch_rule_t batch_rule>
23486 at::Tensor logdet_generated_plumbing(const at::Tensor & self) {
23487   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23488   auto maybe_layer = maybeCurrentDynamicLayer();
23489   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23490   int64_t cur_level = maybe_layer->layerId();
23491   if (!isBatchedAtLevel(self, cur_level)) {
23492     return at::_ops::logdet::call(self);
23493   }
23494   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23495   auto results = batch_rule(self_value, self_bdim);
23496   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23497 }
23498 template <typename batch_rule_t, batch_rule_t batch_rule>
23499 ::std::tuple<at::Tensor,at::Tensor> linalg_eig_generated_plumbing(const at::Tensor & self) {
23500   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23501   auto maybe_layer = maybeCurrentDynamicLayer();
23502   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23503   int64_t cur_level = maybe_layer->layerId();
23504   if (!isBatchedAtLevel(self, cur_level)) {
23505     return at::_ops::linalg_eig::call(self);
23506   }
23507   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23508   auto results = batch_rule(self_value, self_bdim);
23509   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23510 }
23511 template <typename batch_rule_t, batch_rule_t batch_rule>
23512 at::Tensor _linalg_eigvals_generated_plumbing(const at::Tensor & self) {
23513   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23514   auto maybe_layer = maybeCurrentDynamicLayer();
23515   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23516   int64_t cur_level = maybe_layer->layerId();
23517   if (!isBatchedAtLevel(self, cur_level)) {
23518     return at::_ops::_linalg_eigvals::call(self);
23519   }
23520   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23521   auto results = batch_rule(self_value, self_bdim);
23522   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23523 }
23524 template <typename batch_rule_t, batch_rule_t batch_rule>
23525 at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) {
23526   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23527   auto maybe_layer = maybeCurrentDynamicLayer();
23528   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23529   int64_t cur_level = maybe_layer->layerId();
23530   if (!isBatchedAtLevel(self, cur_level)) {
23531     return at::_ops::linalg_eigvals::call(self);
23532   }
23533   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23534   auto results = batch_rule(self_value, self_bdim);
23535   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23536 }
23537 template <typename batch_rule_t, batch_rule_t batch_rule>
23538 ::std::tuple<at::Tensor,at::Tensor> _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
23539   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23540   auto maybe_layer = maybeCurrentDynamicLayer();
23541   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23542   int64_t cur_level = maybe_layer->layerId();
23543   if (!isBatchedAtLevel(A, cur_level)) {
23544     return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
23545   }
23546   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23547   auto results = batch_rule(A_value, A_bdim, UPLO, compute_v);
23548   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23549 }
23550 template <typename batch_rule_t, batch_rule_t batch_rule>
23551 ::std::tuple<at::Tensor,at::Tensor> linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
23552   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23553   auto maybe_layer = maybeCurrentDynamicLayer();
23554   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23555   int64_t cur_level = maybe_layer->layerId();
23556   if (!isBatchedAtLevel(self, cur_level)) {
23557     return at::_ops::linalg_eigh::call(self, UPLO);
23558   }
23559   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23560   auto results = batch_rule(self_value, self_bdim, UPLO);
23561   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23562 }
23563 template <typename batch_rule_t, batch_rule_t batch_rule>
23564 at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
23565   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23566   auto maybe_layer = maybeCurrentDynamicLayer();
23567   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23568   int64_t cur_level = maybe_layer->layerId();
23569   if (!isBatchedAtLevel(self, cur_level)) {
23570     return at::_ops::linalg_eigvalsh::call(self, UPLO);
23571   }
23572   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23573   auto results = batch_rule(self_value, self_bdim, UPLO);
23574   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23575 }
23576 template <typename batch_rule_t, batch_rule_t batch_rule>
23577 at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) {
23578   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23579   auto maybe_layer = maybeCurrentDynamicLayer();
23580   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23581   int64_t cur_level = maybe_layer->layerId();
23582   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) {
23583     return at::_ops::linalg_householder_product::call(input, tau);
23584   }
23585   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
23586   auto [tau_value, tau_bdim] = unwrapTensorAtLevel(tau, cur_level);
23587   auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim);
23588   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23589 }
23590 template <typename batch_rule_t, batch_rule_t batch_rule>
23591 ::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) {
23592   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23593   auto maybe_layer = maybeCurrentDynamicLayer();
23594   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23595   int64_t cur_level = maybe_layer->layerId();
23596   if (!isBatchedAtLevel(A, cur_level)) {
23597     return at::_ops::linalg_inv_ex::call(A, check_errors);
23598   }
23599   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23600   auto results = batch_rule(A_value, A_bdim, check_errors);
23601   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23602 }
23603 template <typename batch_rule_t, batch_rule_t batch_rule>
23604 at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) {
23605   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23606   auto maybe_layer = maybeCurrentDynamicLayer();
23607   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23608   int64_t cur_level = maybe_layer->layerId();
23609   if (!isBatchedAtLevel(A, cur_level)) {
23610     return at::_ops::linalg_inv::call(A);
23611   }
23612   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23613   auto results = batch_rule(A_value, A_bdim);
23614   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23615 }
23616 template <typename batch_rule_t, batch_rule_t batch_rule>
23617 at::Tensor inverse_generated_plumbing(const at::Tensor & self) {
23618   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23619   auto maybe_layer = maybeCurrentDynamicLayer();
23620   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23621   int64_t cur_level = maybe_layer->layerId();
23622   if (!isBatchedAtLevel(self, cur_level)) {
23623     return at::_ops::inverse::call(self);
23624   }
23625   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23626   auto results = batch_rule(self_value, self_bdim);
23627   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23628 }
23629 template <typename batch_rule_t, batch_rule_t batch_rule>
23630 at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
23631   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23632   auto maybe_layer = maybeCurrentDynamicLayer();
23633   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23634   int64_t cur_level = maybe_layer->layerId();
23635   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
23636     return at::_ops::inner::call(self, other);
23637   }
23638   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23639   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
23640   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
23641   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23642 }
23643 template <typename batch_rule_t, batch_rule_t batch_rule>
23644 at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
23645   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23646   auto maybe_layer = maybeCurrentDynamicLayer();
23647   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23648   int64_t cur_level = maybe_layer->layerId();
23649   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
23650     return at::_ops::outer::call(self, vec2);
23651   }
23652   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23653   auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level);
23654   auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
23655   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23656 }
23657 template <typename batch_rule_t, batch_rule_t batch_rule>
23658 at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
23659   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23660   auto maybe_layer = maybeCurrentDynamicLayer();
23661   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23662   int64_t cur_level = maybe_layer->layerId();
23663   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
23664     return at::_ops::ger::call(self, vec2);
23665   }
23666   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23667   auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level);
23668   auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
23669   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23670 }
23671 template <typename batch_rule_t, batch_rule_t batch_rule>
23672 at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
23673   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23674   auto maybe_layer = maybeCurrentDynamicLayer();
23675   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23676   int64_t cur_level = maybe_layer->layerId();
23677   if (!isBatchedAtLevel(self, cur_level)) {
23678     return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype);
23679   }
23680   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23681   auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
23682   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23683 }
23684 template <typename batch_rule_t, batch_rule_t batch_rule>
23685 at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
23686   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23687   auto maybe_layer = maybeCurrentDynamicLayer();
23688   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23689   int64_t cur_level = maybe_layer->layerId();
23690   if (!isBatchedAtLevel(self, cur_level)) {
23691     return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype);
23692   }
23693   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23694   auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
23695   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23696 }
23697 template <typename batch_rule_t, batch_rule_t batch_rule>
23698 at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
23699   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23700   auto maybe_layer = maybeCurrentDynamicLayer();
23701   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23702   int64_t cur_level = maybe_layer->layerId();
23703   if (!isBatchedAtLevel(self, cur_level)) {
23704     return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype);
23705   }
23706   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23707   auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
23708   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23709 }
23710 template <typename batch_rule_t, batch_rule_t batch_rule>
23711 at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
23712   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23713   auto maybe_layer = maybeCurrentDynamicLayer();
23714   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23715   int64_t cur_level = maybe_layer->layerId();
23716   if (!isBatchedAtLevel(self, cur_level)) {
23717     return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype);
23718   }
23719   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23720   auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
23721   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23722 }
23723 template <typename batch_rule_t, batch_rule_t batch_rule>
23724 at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
23725   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23726   auto maybe_layer = maybeCurrentDynamicLayer();
23727   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23728   int64_t cur_level = maybe_layer->layerId();
23729   if (!isBatchedAtLevel(self, cur_level)) {
23730     return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype);
23731   }
23732   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23733   auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
23734   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23735 }
23736 template <typename batch_rule_t, batch_rule_t batch_rule>
23737 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver) {
23738   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23739   auto maybe_layer = maybeCurrentDynamicLayer();
23740   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23741   int64_t cur_level = maybe_layer->layerId();
23742   if (!isBatchedAtLevel(A, cur_level)) {
23743     return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver);
23744   }
23745   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23746   auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver);
23747   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
23748 }
23749 template <typename batch_rule_t, batch_rule_t batch_rule>
23750 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver) {
23751   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23752   auto maybe_layer = maybeCurrentDynamicLayer();
23753   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23754   int64_t cur_level = maybe_layer->layerId();
23755   if (!isBatchedAtLevel(A, cur_level)) {
23756     return at::_ops::linalg_svd::call(A, full_matrices, driver);
23757   }
23758   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23759   auto results = batch_rule(A_value, A_bdim, full_matrices, driver);
23760   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
23761 }
23762 template <typename batch_rule_t, batch_rule_t batch_rule>
23763 at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, ::std::optional<c10::string_view> driver) {
23764   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23765   auto maybe_layer = maybeCurrentDynamicLayer();
23766   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23767   int64_t cur_level = maybe_layer->layerId();
23768   if (!isBatchedAtLevel(A, cur_level)) {
23769     return at::_ops::linalg_svdvals::call(A, driver);
23770   }
23771   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23772   auto results = batch_rule(A_value, A_bdim, driver);
23773   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23774 }
23775 template <typename batch_rule_t, batch_rule_t batch_rule>
23776 at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Scalar> & p) {
23777   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23778   auto maybe_layer = maybeCurrentDynamicLayer();
23779   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23780   int64_t cur_level = maybe_layer->layerId();
23781   if (!isBatchedAtLevel(self, cur_level)) {
23782     return at::_ops::linalg_cond::call(self, p);
23783   }
23784   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23785   auto results = batch_rule(self_value, self_bdim, p);
23786   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23787 }
23788 template <typename batch_rule_t, batch_rule_t batch_rule>
23789 at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) {
23790   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23791   auto maybe_layer = maybeCurrentDynamicLayer();
23792   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23793   int64_t cur_level = maybe_layer->layerId();
23794   if (!isBatchedAtLevel(self, cur_level)) {
23795     return at::_ops::linalg_cond_p_str::call(self, p);
23796   }
23797   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23798   auto results = batch_rule(self_value, self_bdim, p);
23799   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23800 }
23801 template <typename batch_rule_t, batch_rule_t batch_rule>
23802 at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian) {
23803   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23804   auto maybe_layer = maybeCurrentDynamicLayer();
23805   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23806   int64_t cur_level = maybe_layer->layerId();
23807   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
23808     return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian);
23809   }
23810   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23811   std::optional<Tensor> atol_value;
23812   std::optional<int64_t> atol_bdim;
23813   if (atol) {
23814       std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
23815   }
23816   std::optional<Tensor> rtol_value;
23817   std::optional<int64_t> rtol_bdim;
23818   if (rtol) {
23819       std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
23820   }
23821   auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
23822   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23823 }
23824 template <typename batch_rule_t, batch_rule_t batch_rule>
23825 at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian) {
23826   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23827   auto maybe_layer = maybeCurrentDynamicLayer();
23828   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23829   int64_t cur_level = maybe_layer->layerId();
23830   if (!isBatchedAtLevel(self, cur_level)) {
23831     return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian);
23832   }
23833   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23834   auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
23835   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23836 }
23837 template <typename batch_rule_t, batch_rule_t batch_rule>
23838 at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) {
23839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23840   auto maybe_layer = maybeCurrentDynamicLayer();
23841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23842   int64_t cur_level = maybe_layer->layerId();
23843   if (!isBatchedAtLevel(self, cur_level)) {
23844     return at::_ops::linalg_pinv::call(self, rcond, hermitian);
23845   }
23846   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23847   auto results = batch_rule(self_value, self_bdim, rcond, hermitian);
23848   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23849 }
23850 template <typename batch_rule_t, batch_rule_t batch_rule>
23851 at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
23852   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23853   auto maybe_layer = maybeCurrentDynamicLayer();
23854   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23855   int64_t cur_level = maybe_layer->layerId();
23856   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) {
23857     return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian);
23858   }
23859   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23860   auto [rcond_value, rcond_bdim] = unwrapTensorAtLevel(rcond, cur_level);
23861   auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian);
23862   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23863 }
23864 template <typename batch_rule_t, batch_rule_t batch_rule>
23865 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
23866   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23867   auto maybe_layer = maybeCurrentDynamicLayer();
23868   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23869   int64_t cur_level = maybe_layer->layerId();
23870   if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
23871     return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors);
23872   }
23873   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23874   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
23875   auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
23876   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
23877 }
23878 template <typename batch_rule_t, batch_rule_t batch_rule>
23879 ::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
23880   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23881   auto maybe_layer = maybeCurrentDynamicLayer();
23882   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23883   int64_t cur_level = maybe_layer->layerId();
23884   if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
23885     return at::_ops::linalg_solve_ex::call(A, B, left, check_errors);
23886   }
23887   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23888   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
23889   auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
23890   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23891 }
23892 template <typename batch_rule_t, batch_rule_t batch_rule>
23893 at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) {
23894   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23895   auto maybe_layer = maybeCurrentDynamicLayer();
23896   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23897   int64_t cur_level = maybe_layer->layerId();
23898   if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
23899     return at::_ops::linalg_solve::call(A, B, left);
23900   }
23901   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23902   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
23903   auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left);
23904   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23905 }
23906 template <typename batch_rule_t, batch_rule_t batch_rule>
23907 at::Tensor _spsolve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) {
23908   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23909   auto maybe_layer = maybeCurrentDynamicLayer();
23910   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23911   int64_t cur_level = maybe_layer->layerId();
23912   if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
23913     return at::_ops::_spsolve::call(A, B, left);
23914   }
23915   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23916   auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level);
23917   auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left);
23918   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23919 }
23920 template <typename batch_rule_t, batch_rule_t batch_rule>
23921 at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) {
23922   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23923   auto maybe_layer = maybeCurrentDynamicLayer();
23924   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23925   int64_t cur_level = maybe_layer->layerId();
23926   if (!isBatchedAtLevel(self, cur_level)) {
23927     return at::_ops::linalg_tensorinv::call(self, ind);
23928   }
23929   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23930   auto results = batch_rule(self_value, self_bdim, ind);
23931   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23932 }
23933 template <typename batch_rule_t, batch_rule_t batch_rule>
23934 at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
23935   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23936   auto maybe_layer = maybeCurrentDynamicLayer();
23937   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23938   int64_t cur_level = maybe_layer->layerId();
23939   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
23940     return at::_ops::linalg_tensorsolve::call(self, other, dims);
23941   }
23942   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23943   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
23944   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims);
23945   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23946 }
23947 template <typename batch_rule_t, batch_rule_t batch_rule>
23948 ::std::tuple<at::Tensor,at::Tensor> linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) {
23949   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23950   auto maybe_layer = maybeCurrentDynamicLayer();
23951   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23952   int64_t cur_level = maybe_layer->layerId();
23953   if (!isBatchedAtLevel(A, cur_level)) {
23954     return at::_ops::linalg_qr::call(A, mode);
23955   }
23956   auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level);
23957   auto results = batch_rule(A_value, A_bdim, mode);
23958   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23959 }
23960 template <typename batch_rule_t, batch_rule_t batch_rule>
23961 at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
23962   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23963   auto maybe_layer = maybeCurrentDynamicLayer();
23964   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23965   int64_t cur_level = maybe_layer->layerId();
23966   if (!isBatchedAtLevel(self, cur_level)) {
23967     return at::_ops::linalg_matrix_power::call(self, n);
23968   }
23969   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
23970   auto results = batch_rule(self_value, self_bdim, n);
23971   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23972 }
23973 template <typename batch_rule_t, batch_rule_t batch_rule>
23974 at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian) {
23975   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23976   auto maybe_layer = maybeCurrentDynamicLayer();
23977   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23978   int64_t cur_level = maybe_layer->layerId();
23979   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
23980     return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian);
23981   }
23982   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
23983   std::optional<Tensor> atol_value;
23984   std::optional<int64_t> atol_bdim;
23985   if (atol) {
23986       std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
23987   }
23988   std::optional<Tensor> rtol_value;
23989   std::optional<int64_t> rtol_bdim;
23990   if (rtol) {
23991       std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
23992   }
23993   auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
23994   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23995 }
23996 template <typename batch_rule_t, batch_rule_t batch_rule>
23997 at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian) {
23998   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23999   auto maybe_layer = maybeCurrentDynamicLayer();
24000   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24001   int64_t cur_level = maybe_layer->layerId();
24002   if (!isBatchedAtLevel(self, cur_level)) {
24003     return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian);
24004   }
24005   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24006   auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
24007   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24008 }
24009 template <typename batch_rule_t, batch_rule_t batch_rule>
24010 at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) {
24011   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24012   auto maybe_layer = maybeCurrentDynamicLayer();
24013   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24014   int64_t cur_level = maybe_layer->layerId();
24015   if (!isBatchedAtLevel(self, cur_level)) {
24016     return at::_ops::linalg_matrix_rank::call(self, tol, hermitian);
24017   }
24018   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24019   auto results = batch_rule(self_value, self_bdim, tol, hermitian);
24020   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24021 }
24022 template <typename batch_rule_t, batch_rule_t batch_rule>
24023 at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
24024   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24025   auto maybe_layer = maybeCurrentDynamicLayer();
24026   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24027   int64_t cur_level = maybe_layer->layerId();
24028   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) {
24029     return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian);
24030   }
24031   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
24032   auto [tol_value, tol_bdim] = unwrapTensorAtLevel(tol, cur_level);
24033   auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian);
24034   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24035 }
24036 template <typename batch_rule_t, batch_rule_t batch_rule>
24037 at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) {
24038   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24039   auto maybe_layer = maybeCurrentDynamicLayer();
24040   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24041   int64_t cur_level = maybe_layer->layerId();
24042   if (!isBatchedAtLevel(tensors, cur_level)) {
24043     return at::_ops::linalg_multi_dot::call(tensors);
24044   }
24045 
24046   auto results = batch_rule(tensors);
24047   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24048 }
24049 template <typename batch_rule_t, batch_rule_t batch_rule>
24050 at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
24051   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24052   auto maybe_layer = maybeCurrentDynamicLayer();
24053   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24054   int64_t cur_level = maybe_layer->layerId();
24055   if (!isBatchedAtLevel(self, cur_level)) {
24056     return at::_ops::nested_to_padded_tensor::call(self, padding, output_size);
24057   }
24058   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24059   auto results = batch_rule(self_value, self_bdim, padding, output_size);
24060   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24061 }
24062 template <typename batch_rule_t, batch_rule_t batch_rule>
24063 at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
24064   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24065   auto maybe_layer = maybeCurrentDynamicLayer();
24066   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24067   int64_t cur_level = maybe_layer->layerId();
24068   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
24069     return at::_ops::_test_serialization_subcmul::call(self, other, alpha);
24070   }
24071   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24072   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
24073   auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
24074   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24075 }
24076 template <typename batch_rule_t, batch_rule_t batch_rule>
24077 at::Tensor _test_parallel_materialize_generated_plumbing(const at::Tensor & self, int64_t num_parallel, bool skip_first) {
24078   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24079   auto maybe_layer = maybeCurrentDynamicLayer();
24080   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24081   int64_t cur_level = maybe_layer->layerId();
24082   if (!isBatchedAtLevel(self, cur_level)) {
24083     return at::_ops::_test_parallel_materialize::call(self, num_parallel, skip_first);
24084   }
24085   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24086   auto results = batch_rule(self_value, self_bdim, num_parallel, skip_first);
24087   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24088 }
24089 template <typename batch_rule_t, batch_rule_t batch_rule>
24090 at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
24091   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24092   auto maybe_layer = maybeCurrentDynamicLayer();
24093   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24094   int64_t cur_level = maybe_layer->layerId();
24095   if (!isBatchedAtLevel(values, cur_level)) {
24096     return at::_ops::_test_optional_intlist::call(values, addends);
24097   }
24098   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
24099   auto results = batch_rule(values_value, values_bdim, addends);
24100   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24101 }
24102 template <typename batch_rule_t, batch_rule_t batch_rule>
24103 at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
24104   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24105   auto maybe_layer = maybeCurrentDynamicLayer();
24106   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24107   int64_t cur_level = maybe_layer->layerId();
24108   if (!isBatchedAtLevel(values, cur_level)) {
24109     return at::_ops::_test_optional_filled_intlist::call(values, addends);
24110   }
24111   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
24112   auto results = batch_rule(values_value, values_bdim, addends);
24113   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24114 }
24115 template <typename batch_rule_t, batch_rule_t batch_rule>
24116 at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends) {
24117   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24118   auto maybe_layer = maybeCurrentDynamicLayer();
24119   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24120   int64_t cur_level = maybe_layer->layerId();
24121   if (!isBatchedAtLevel(values, cur_level)) {
24122     return at::_ops::_test_optional_floatlist::call(values, addends);
24123   }
24124   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
24125   auto results = batch_rule(values_value, values_bdim, addends);
24126   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24127 }
24128 template <typename batch_rule_t, batch_rule_t batch_rule>
24129 at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
24130   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24131   auto maybe_layer = maybeCurrentDynamicLayer();
24132   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24133   int64_t cur_level = maybe_layer->layerId();
24134   if (!isBatchedAtLevel(dummy, cur_level)) {
24135     return at::_ops::_test_string_default::call(dummy, a, b);
24136   }
24137   auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level);
24138   auto results = batch_rule(dummy_value, dummy_bdim, a, b);
24139   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24140 }
24141 template <typename batch_rule_t, batch_rule_t batch_rule>
24142 at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) {
24143   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24144   auto maybe_layer = maybeCurrentDynamicLayer();
24145   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24146   int64_t cur_level = maybe_layer->layerId();
24147   if (!isBatchedAtLevel(dummy, cur_level)) {
24148     return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b);
24149   }
24150   auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level);
24151   auto results = batch_rule(dummy_value, dummy_bdim, a, b);
24152   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24153 }
24154 template <typename batch_rule_t, batch_rule_t batch_rule>
24155 at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) {
24156   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24157   auto maybe_layer = maybeCurrentDynamicLayer();
24158   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24159   int64_t cur_level = maybe_layer->layerId();
24160   if (!isBatchedAtLevel(dummy, cur_level)) {
24161     return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b);
24162   }
24163   auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level);
24164   auto results = batch_rule(dummy_value, dummy_bdim, a, b);
24165   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24166 }
24167 template <typename batch_rule_t, batch_rule_t batch_rule>
24168 at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) {
24169   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24170   auto maybe_layer = maybeCurrentDynamicLayer();
24171   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24172   int64_t cur_level = maybe_layer->layerId();
24173   if (!isBatchedAtLevel(self, cur_level)) {
24174     return at::_ops::_test_warn_in_autograd::call(self);
24175   }
24176   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24177   auto results = batch_rule(self_value, self_bdim);
24178   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24179 }
24180 template <typename batch_rule_t, batch_rule_t batch_rule>
24181 at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) {
24182   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24183   auto maybe_layer = maybeCurrentDynamicLayer();
24184   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24185   int64_t cur_level = maybe_layer->layerId();
24186   if (!isBatchedAtLevel(self, cur_level)) {
24187     return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self);
24188   }
24189   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24190   auto results = batch_rule(self_value, self_bdim);
24191   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24192 }
24193 template <typename batch_rule_t, batch_rule_t batch_rule>
24194 at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) {
24195   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24196   auto maybe_layer = maybeCurrentDynamicLayer();
24197   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24198   int64_t cur_level = maybe_layer->layerId();
24199   if (!isBatchedAtLevel(self, cur_level)) {
24200     return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b);
24201   }
24202   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24203   auto results = batch_rule(self_value, self_bdim, b);
24204   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24205 }
24206 template <typename batch_rule_t, batch_rule_t batch_rule>
24207 at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) {
24208   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24209   auto maybe_layer = maybeCurrentDynamicLayer();
24210   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24211   int64_t cur_level = maybe_layer->layerId();
24212   if (!isBatchedAtLevel(self, cur_level)) {
24213     return at::_ops::_test_autograd_multiple_dispatch_view::call(self);
24214   }
24215   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24216   auto results = batch_rule(self_value, self_bdim);
24217   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24218 }
24219 template <typename batch_rule_t, batch_rule_t batch_rule>
24220 at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) {
24221   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24222   auto maybe_layer = maybeCurrentDynamicLayer();
24223   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24224   int64_t cur_level = maybe_layer->layerId();
24225   if (!isBatchedAtLevel(self, cur_level)) {
24226     return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self);
24227   }
24228   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24229   auto results = batch_rule(self_value, self_bdim);
24230   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24231 }
24232 template <typename batch_rule_t, batch_rule_t batch_rule>
24233 at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial) {
24234   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24235   auto maybe_layer = maybeCurrentDynamicLayer();
24236   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24237   int64_t cur_level = maybe_layer->layerId();
24238   if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
24239     return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
24240   }
24241   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
24242   std::optional<Tensor> lengths_value;
24243   std::optional<int64_t> lengths_bdim;
24244   if (lengths) {
24245       std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
24246   }
24247   std::optional<Tensor> indices_value;
24248   std::optional<int64_t> indices_bdim;
24249   if (indices) {
24250       std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level);
24251   }
24252   std::optional<Tensor> offsets_value;
24253   std::optional<int64_t> offsets_bdim;
24254   if (offsets) {
24255       std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
24256   }
24257   auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial);
24258   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24259 }
24260 template <typename batch_rule_t, batch_rule_t batch_rule>
24261 at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & offsets, int64_t axis, const ::std::optional<at::Scalar> & initial) {
24262   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24263   auto maybe_layer = maybeCurrentDynamicLayer();
24264   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24265   int64_t cur_level = maybe_layer->layerId();
24266   if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
24267     return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial);
24268   }
24269   auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level);
24270   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
24271   auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level);
24272   std::optional<Tensor> lengths_value;
24273   std::optional<int64_t> lengths_bdim;
24274   if (lengths) {
24275       std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
24276   }
24277   std::optional<Tensor> offsets_value;
24278   std::optional<int64_t> offsets_bdim;
24279   if (offsets) {
24280       std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
24281   }
24282   auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial);
24283   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24284 }
24285 template <typename batch_rule_t, batch_rule_t batch_rule>
24286 at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value, c10::string_view padding_side) {
24287   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24288   auto maybe_layer = maybeCurrentDynamicLayer();
24289   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24290   int64_t cur_level = maybe_layer->layerId();
24291   if (!isBatchedAtLevel(sequences, cur_level)) {
24292     return at::_ops::pad_sequence::call(sequences, batch_first, padding_value, padding_side);
24293   }
24294 
24295   auto results = batch_rule(sequences, batch_first, padding_value, padding_side);
24296   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24297 }
24298 template <typename batch_rule_t, batch_rule_t batch_rule>
24299 at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) {
24300   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24301   auto maybe_layer = maybeCurrentDynamicLayer();
24302   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24303   int64_t cur_level = maybe_layer->layerId();
24304   if (!isBatchedAtLevel(tensors, cur_level)) {
24305     return at::_ops::flatten_dense_tensors::call(tensors);
24306   }
24307 
24308   auto results = batch_rule(tensors);
24309   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24310 }
24311 template <typename batch_rule_t, batch_rule_t batch_rule>
24312 ::std::vector<at::Tensor> unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) {
24313   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24314   auto maybe_layer = maybeCurrentDynamicLayer();
24315   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24316   int64_t cur_level = maybe_layer->layerId();
24317   if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) {
24318     return at::_ops::unflatten_dense_tensors::call(flat, tensors);
24319   }
24320   auto [flat_value, flat_bdim] = unwrapTensorAtLevel(flat, cur_level);
24321   auto results = batch_rule(flat_value, flat_bdim, tensors);
24322   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
24323 }
24324 template <typename batch_rule_t, batch_rule_t batch_rule>
24325 at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
24326   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24327   auto maybe_layer = maybeCurrentDynamicLayer();
24328   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24329   int64_t cur_level = maybe_layer->layerId();
24330   if (!isBatchedAtLevel(list, cur_level)) {
24331     return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory);
24332   }
24333 
24334   auto results = batch_rule(list, dtype, layout, device, pin_memory);
24335   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24336 }
24337 template <typename batch_rule_t, batch_rule_t batch_rule>
24338 at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) {
24339   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24340   auto maybe_layer = maybeCurrentDynamicLayer();
24341   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24342   int64_t cur_level = maybe_layer->layerId();
24343   if (!isBatchedAtLevel(self, cur_level)) {
24344     return at::_ops::_fw_primal_copy::call(self, level);
24345   }
24346   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24347   auto results = batch_rule(self_value, self_bdim, level);
24348   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24349 }
24350 template <typename batch_rule_t, batch_rule_t batch_rule>
24351 at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
24352   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24353   auto maybe_layer = maybeCurrentDynamicLayer();
24354   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24355   int64_t cur_level = maybe_layer->layerId();
24356   if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
24357     return at::_ops::_make_dual_copy::call(primal, tangent, level);
24358   }
24359   auto [primal_value, primal_bdim] = unwrapTensorAtLevel(primal, cur_level);
24360   auto [tangent_value, tangent_bdim] = unwrapTensorAtLevel(tangent, cur_level);
24361   auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
24362   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24363 }
24364 template <typename batch_rule_t, batch_rule_t batch_rule>
24365 at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) {
24366   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24367   auto maybe_layer = maybeCurrentDynamicLayer();
24368   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24369   int64_t cur_level = maybe_layer->layerId();
24370   if (!isBatchedAtLevel(self, cur_level)) {
24371     return at::_ops::view_as_real_copy::call(self);
24372   }
24373   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24374   auto results = batch_rule(self_value, self_bdim);
24375   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24376 }
24377 template <typename batch_rule_t, batch_rule_t batch_rule>
24378 at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) {
24379   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24380   auto maybe_layer = maybeCurrentDynamicLayer();
24381   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24382   int64_t cur_level = maybe_layer->layerId();
24383   if (!isBatchedAtLevel(self, cur_level)) {
24384     return at::_ops::view_as_complex_copy::call(self);
24385   }
24386   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24387   auto results = batch_rule(self_value, self_bdim);
24388   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24389 }
24390 template <typename batch_rule_t, batch_rule_t batch_rule>
24391 at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) {
24392   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24393   auto maybe_layer = maybeCurrentDynamicLayer();
24394   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24395   int64_t cur_level = maybe_layer->layerId();
24396   if (!isBatchedAtLevel(self, cur_level)) {
24397     return at::_ops::_conj_copy::call(self);
24398   }
24399   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24400   auto results = batch_rule(self_value, self_bdim);
24401   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24402 }
24403 template <typename batch_rule_t, batch_rule_t batch_rule>
24404 at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) {
24405   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24406   auto maybe_layer = maybeCurrentDynamicLayer();
24407   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24408   int64_t cur_level = maybe_layer->layerId();
24409   if (!isBatchedAtLevel(self, cur_level)) {
24410     return at::_ops::_neg_view_copy::call(self);
24411   }
24412   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24413   auto results = batch_rule(self_value, self_bdim);
24414   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24415 }
24416 template <typename batch_rule_t, batch_rule_t batch_rule>
24417 at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
24418   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24419   auto maybe_layer = maybeCurrentDynamicLayer();
24420   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24421   int64_t cur_level = maybe_layer->layerId();
24422   if (!isBatchedAtLevel(self, cur_level)) {
24423     return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
24424   }
24425   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24426   auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
24427   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24428 }
24429 template <typename batch_rule_t, batch_rule_t batch_rule>
24430 at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
24431   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24432   auto maybe_layer = maybeCurrentDynamicLayer();
24433   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24434   int64_t cur_level = maybe_layer->layerId();
24435   if (!isBatchedAtLevel(self, cur_level)) {
24436     return at::_ops::_sparse_broadcast_to_copy::call(self, size);
24437   }
24438   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24439   auto results = batch_rule(self_value, self_bdim, size);
24440   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24441 }
24442 template <typename batch_rule_t, batch_rule_t batch_rule>
24443 at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
24444   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24445   auto maybe_layer = maybeCurrentDynamicLayer();
24446   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24447   int64_t cur_level = maybe_layer->layerId();
24448   if (!isBatchedAtLevel(self, cur_level)) {
24449     return at::_ops::diagonal_copy::call(self, offset, dim1, dim2);
24450   }
24451   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24452   auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
24453   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24454 }
24455 template <typename batch_rule_t, batch_rule_t batch_rule>
24456 at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
24457   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24458   auto maybe_layer = maybeCurrentDynamicLayer();
24459   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24460   int64_t cur_level = maybe_layer->layerId();
24461   if (!isBatchedAtLevel(self, cur_level)) {
24462     return at::_ops::expand_copy::call(self, size, implicit);
24463   }
24464   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24465   auto results = batch_rule(self_value, self_bdim, size, implicit);
24466   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24467 }
24468 template <typename batch_rule_t, batch_rule_t batch_rule>
24469 at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
24470   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24471   auto maybe_layer = maybeCurrentDynamicLayer();
24472   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24473   int64_t cur_level = maybe_layer->layerId();
24474   if (!isBatchedAtLevel(self, cur_level)) {
24475     return at::_ops::permute_copy::call(self, dims);
24476   }
24477   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24478   auto results = batch_rule(self_value, self_bdim, dims);
24479   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24480 }
24481 template <typename batch_rule_t, batch_rule_t batch_rule>
24482 at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
24483   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24484   auto maybe_layer = maybeCurrentDynamicLayer();
24485   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24486   int64_t cur_level = maybe_layer->layerId();
24487   if (!isBatchedAtLevel(self, cur_level)) {
24488     return at::_ops::_reshape_alias_copy::call(self, size, stride);
24489   }
24490   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24491   auto results = batch_rule(self_value, self_bdim, size, stride);
24492   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24493 }
24494 template <typename batch_rule_t, batch_rule_t batch_rule>
24495 at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
24496   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24497   auto maybe_layer = maybeCurrentDynamicLayer();
24498   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24499   int64_t cur_level = maybe_layer->layerId();
24500   if (!isBatchedAtLevel(self, cur_level)) {
24501     return at::_ops::select_copy_int::call(self, dim, index);
24502   }
24503   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24504   auto results = batch_rule(self_value, self_bdim, dim, index);
24505   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24506 }
24507 template <typename batch_rule_t, batch_rule_t batch_rule>
24508 at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) {
24509   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24510   auto maybe_layer = maybeCurrentDynamicLayer();
24511   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24512   int64_t cur_level = maybe_layer->layerId();
24513   if (!isBatchedAtLevel(self, cur_level)) {
24514     return at::_ops::detach_copy::call(self);
24515   }
24516   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24517   auto results = batch_rule(self_value, self_bdim);
24518   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24519 }
24520 template <typename batch_rule_t, batch_rule_t batch_rule>
24521 at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
24522   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24523   auto maybe_layer = maybeCurrentDynamicLayer();
24524   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24525   int64_t cur_level = maybe_layer->layerId();
24526   if (!isBatchedAtLevel(self, cur_level)) {
24527     return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
24528   }
24529   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24530   auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
24531   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24532 }
24533 template <typename batch_rule_t, batch_rule_t batch_rule>
24534 ::std::vector<at::Tensor> split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
24535   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24536   auto maybe_layer = maybeCurrentDynamicLayer();
24537   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24538   int64_t cur_level = maybe_layer->layerId();
24539   if (!isBatchedAtLevel(self, cur_level)) {
24540     return at::_ops::split_copy_Tensor::call(self, split_size, dim);
24541   }
24542   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24543   auto results = batch_rule(self_value, self_bdim, split_size, dim);
24544   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
24545 }
24546 template <typename batch_rule_t, batch_rule_t batch_rule>
24547 ::std::vector<at::Tensor> split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
24548   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24549   auto maybe_layer = maybeCurrentDynamicLayer();
24550   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24551   int64_t cur_level = maybe_layer->layerId();
24552   if (!isBatchedAtLevel(self, cur_level)) {
24553     return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
24554   }
24555   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24556   auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
24557   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
24558 }
24559 template <typename batch_rule_t, batch_rule_t batch_rule>
24560 at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) {
24561   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24562   auto maybe_layer = maybeCurrentDynamicLayer();
24563   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24564   int64_t cur_level = maybe_layer->layerId();
24565   if (!isBatchedAtLevel(self, cur_level)) {
24566     return at::_ops::squeeze_copy::call(self);
24567   }
24568   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24569   auto results = batch_rule(self_value, self_bdim);
24570   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24571 }
24572 template <typename batch_rule_t, batch_rule_t batch_rule>
24573 at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
24574   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24575   auto maybe_layer = maybeCurrentDynamicLayer();
24576   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24577   int64_t cur_level = maybe_layer->layerId();
24578   if (!isBatchedAtLevel(self, cur_level)) {
24579     return at::_ops::squeeze_copy_dim::call(self, dim);
24580   }
24581   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24582   auto results = batch_rule(self_value, self_bdim, dim);
24583   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24584 }
24585 template <typename batch_rule_t, batch_rule_t batch_rule>
24586 at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
24587   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24588   auto maybe_layer = maybeCurrentDynamicLayer();
24589   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24590   int64_t cur_level = maybe_layer->layerId();
24591   if (!isBatchedAtLevel(self, cur_level)) {
24592     return at::_ops::squeeze_copy_dims::call(self, dim);
24593   }
24594   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24595   auto results = batch_rule(self_value, self_bdim, dim);
24596   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24597 }
24598 template <typename batch_rule_t, batch_rule_t batch_rule>
24599 at::Tensor t_copy_generated_plumbing(const at::Tensor & self) {
24600   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24601   auto maybe_layer = maybeCurrentDynamicLayer();
24602   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24603   int64_t cur_level = maybe_layer->layerId();
24604   if (!isBatchedAtLevel(self, cur_level)) {
24605     return at::_ops::t_copy::call(self);
24606   }
24607   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24608   auto results = batch_rule(self_value, self_bdim);
24609   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24610 }
24611 template <typename batch_rule_t, batch_rule_t batch_rule>
24612 at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
24613   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24614   auto maybe_layer = maybeCurrentDynamicLayer();
24615   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24616   int64_t cur_level = maybe_layer->layerId();
24617   if (!isBatchedAtLevel(self, cur_level)) {
24618     return at::_ops::transpose_copy_int::call(self, dim0, dim1);
24619   }
24620   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24621   auto results = batch_rule(self_value, self_bdim, dim0, dim1);
24622   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24623 }
24624 template <typename batch_rule_t, batch_rule_t batch_rule>
24625 at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) {
24626   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24627   auto maybe_layer = maybeCurrentDynamicLayer();
24628   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24629   int64_t cur_level = maybe_layer->layerId();
24630   if (!isBatchedAtLevel(self, cur_level)) {
24631     return at::_ops::unsqueeze_copy::call(self, dim);
24632   }
24633   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24634   auto results = batch_rule(self_value, self_bdim, dim);
24635   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24636 }
24637 template <typename batch_rule_t, batch_rule_t batch_rule>
24638 at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) {
24639   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24640   auto maybe_layer = maybeCurrentDynamicLayer();
24641   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24642   int64_t cur_level = maybe_layer->layerId();
24643   if (!isBatchedAtLevel(self, cur_level)) {
24644     return at::_ops::_indices_copy::call(self);
24645   }
24646   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24647   auto results = batch_rule(self_value, self_bdim);
24648   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24649 }
24650 template <typename batch_rule_t, batch_rule_t batch_rule>
24651 at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) {
24652   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24653   auto maybe_layer = maybeCurrentDynamicLayer();
24654   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24655   int64_t cur_level = maybe_layer->layerId();
24656   if (!isBatchedAtLevel(self, cur_level)) {
24657     return at::_ops::_values_copy::call(self);
24658   }
24659   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24660   auto results = batch_rule(self_value, self_bdim);
24661   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24662 }
24663 template <typename batch_rule_t, batch_rule_t batch_rule>
24664 at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) {
24665   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24666   auto maybe_layer = maybeCurrentDynamicLayer();
24667   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24668   int64_t cur_level = maybe_layer->layerId();
24669   if (!isBatchedAtLevel(self, cur_level)) {
24670     return at::_ops::indices_copy::call(self);
24671   }
24672   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24673   auto results = batch_rule(self_value, self_bdim);
24674   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24675 }
24676 template <typename batch_rule_t, batch_rule_t batch_rule>
24677 at::Tensor values_copy_generated_plumbing(const at::Tensor & self) {
24678   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24679   auto maybe_layer = maybeCurrentDynamicLayer();
24680   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24681   int64_t cur_level = maybe_layer->layerId();
24682   if (!isBatchedAtLevel(self, cur_level)) {
24683     return at::_ops::values_copy::call(self);
24684   }
24685   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24686   auto results = batch_rule(self_value, self_bdim);
24687   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24688 }
24689 template <typename batch_rule_t, batch_rule_t batch_rule>
24690 at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) {
24691   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24692   auto maybe_layer = maybeCurrentDynamicLayer();
24693   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24694   int64_t cur_level = maybe_layer->layerId();
24695   if (!isBatchedAtLevel(self, cur_level)) {
24696     return at::_ops::crow_indices_copy::call(self);
24697   }
24698   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24699   auto results = batch_rule(self_value, self_bdim);
24700   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24701 }
24702 template <typename batch_rule_t, batch_rule_t batch_rule>
24703 at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) {
24704   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24705   auto maybe_layer = maybeCurrentDynamicLayer();
24706   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24707   int64_t cur_level = maybe_layer->layerId();
24708   if (!isBatchedAtLevel(self, cur_level)) {
24709     return at::_ops::col_indices_copy::call(self);
24710   }
24711   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24712   auto results = batch_rule(self_value, self_bdim);
24713   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24714 }
24715 template <typename batch_rule_t, batch_rule_t batch_rule>
24716 at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) {
24717   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24718   auto maybe_layer = maybeCurrentDynamicLayer();
24719   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24720   int64_t cur_level = maybe_layer->layerId();
24721   if (!isBatchedAtLevel(self, cur_level)) {
24722     return at::_ops::ccol_indices_copy::call(self);
24723   }
24724   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24725   auto results = batch_rule(self_value, self_bdim);
24726   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24727 }
24728 template <typename batch_rule_t, batch_rule_t batch_rule>
24729 at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) {
24730   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24731   auto maybe_layer = maybeCurrentDynamicLayer();
24732   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24733   int64_t cur_level = maybe_layer->layerId();
24734   if (!isBatchedAtLevel(self, cur_level)) {
24735     return at::_ops::row_indices_copy::call(self);
24736   }
24737   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24738   auto results = batch_rule(self_value, self_bdim);
24739   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24740 }
24741 template <typename batch_rule_t, batch_rule_t batch_rule>
24742 ::std::vector<at::Tensor> unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
24743   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24744   auto maybe_layer = maybeCurrentDynamicLayer();
24745   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24746   int64_t cur_level = maybe_layer->layerId();
24747   if (!isBatchedAtLevel(self, cur_level)) {
24748     return at::_ops::unbind_copy_int::call(self, dim);
24749   }
24750   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24751   auto results = batch_rule(self_value, self_bdim, dim);
24752   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
24753 }
24754 template <typename batch_rule_t, batch_rule_t batch_rule>
24755 void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) {
24756   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24757   auto maybe_layer = maybeCurrentDynamicLayer();
24758   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
24759   int64_t cur_level = maybe_layer->layerId();
24760   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
24761     return at::_ops::unbind_copy_int_out::call(self, dim, out);
24762   }
24763   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24764   batch_rule(self_value, self_bdim, dim, out);
24765 }
24766 template <typename batch_rule_t, batch_rule_t batch_rule>
24767 void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
24768   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24769   auto maybe_layer = maybeCurrentDynamicLayer();
24770   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
24771   int64_t cur_level = maybe_layer->layerId();
24772   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
24773     return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
24774   }
24775   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24776   batch_rule(self_value, self_bdim, split_size, dim, out);
24777 }
24778 template <typename batch_rule_t, batch_rule_t batch_rule>
24779 void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
24780   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24781   auto maybe_layer = maybeCurrentDynamicLayer();
24782   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
24783   int64_t cur_level = maybe_layer->layerId();
24784   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
24785     return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
24786   }
24787   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24788   batch_rule(self_value, self_bdim, split_sizes, dim, out);
24789 }
24790 template <typename batch_rule_t, batch_rule_t batch_rule>
24791 at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
24792   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24793   auto maybe_layer = maybeCurrentDynamicLayer();
24794   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24795   int64_t cur_level = maybe_layer->layerId();
24796   if (!isBatchedAtLevel(self, cur_level)) {
24797     return at::_ops::view_copy::call(self, size);
24798   }
24799   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24800   auto results = batch_rule(self_value, self_bdim, size);
24801   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24802 }
24803 template <typename batch_rule_t, batch_rule_t batch_rule>
24804 at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
24805   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24806   auto maybe_layer = maybeCurrentDynamicLayer();
24807   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24808   int64_t cur_level = maybe_layer->layerId();
24809   if (!isBatchedAtLevel(self, cur_level)) {
24810     return at::_ops::view_copy_dtype::call(self, dtype);
24811   }
24812   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24813   auto results = batch_rule(self_value, self_bdim, dtype);
24814   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24815 }
24816 template <typename batch_rule_t, batch_rule_t batch_rule>
24817 at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
24818   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24819   auto maybe_layer = maybeCurrentDynamicLayer();
24820   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24821   int64_t cur_level = maybe_layer->layerId();
24822   if (!isBatchedAtLevel(self, cur_level)) {
24823     return at::_ops::unfold_copy::call(self, dimension, size, step);
24824   }
24825   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24826   auto results = batch_rule(self_value, self_bdim, dimension, size, step);
24827   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24828 }
24829 template <typename batch_rule_t, batch_rule_t batch_rule>
24830 at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) {
24831   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24832   auto maybe_layer = maybeCurrentDynamicLayer();
24833   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24834   int64_t cur_level = maybe_layer->layerId();
24835   if (!isBatchedAtLevel(self, cur_level)) {
24836     return at::_ops::alias_copy::call(self);
24837   }
24838   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24839   auto results = batch_rule(self_value, self_bdim);
24840   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24841 }
24842 template <typename batch_rule_t, batch_rule_t batch_rule>
24843 at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
24844   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24845   auto maybe_layer = maybeCurrentDynamicLayer();
24846   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24847   int64_t cur_level = maybe_layer->layerId();
24848   if (!isBatchedAtLevel(self, cur_level)) {
24849     return at::_ops::to_padded_tensor::call(self, padding, output_size);
24850   }
24851   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24852   auto results = batch_rule(self_value, self_bdim, padding, output_size);
24853   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24854 }
24855 template <typename batch_rule_t, batch_rule_t batch_rule>
24856 at::Tensor _jagged_to_padded_dense_forward_generated_plumbing(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value) {
24857   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24858   auto maybe_layer = maybeCurrentDynamicLayer();
24859   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24860   int64_t cur_level = maybe_layer->layerId();
24861   if (!isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
24862     return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, max_lengths, padding_value);
24863   }
24864   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
24865   auto results = batch_rule(values_value, values_bdim, offsets, max_lengths, padding_value);
24866   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24867 }
24868 template <typename batch_rule_t, batch_rule_t batch_rule>
24869 at::Tensor _padded_dense_to_jagged_forward_generated_plumbing(const at::Tensor & dense, at::TensorList offsets, ::std::optional<c10::SymInt> total_L) {
24870   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24871   auto maybe_layer = maybeCurrentDynamicLayer();
24872   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24873   int64_t cur_level = maybe_layer->layerId();
24874   if (!isBatchedAtLevel(dense, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
24875     return at::_ops::_padded_dense_to_jagged_forward::call(dense, offsets, total_L);
24876   }
24877   auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level);
24878   auto results = batch_rule(dense_value, dense_bdim, offsets, total_L);
24879   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24880 }
24881 template <typename batch_rule_t, batch_rule_t batch_rule>
24882 at::Tensor _nested_from_padded_tensor_generated_plumbing(const at::Tensor & padded, const at::Tensor & offsets, const at::Tensor & dummy, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen, ::std::optional<c10::SymInt> sum_S) {
24883   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24884   auto maybe_layer = maybeCurrentDynamicLayer();
24885   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24886   int64_t cur_level = maybe_layer->layerId();
24887   if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(min_seqlen, cur_level) && !isBatchedAtLevel(max_seqlen, cur_level)) {
24888     return at::_ops::_nested_from_padded_tensor::call(padded, offsets, dummy, ragged_idx, min_seqlen, max_seqlen, sum_S);
24889   }
24890   auto [padded_value, padded_bdim] = unwrapTensorAtLevel(padded, cur_level);
24891   auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level);
24892   auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level);
24893   std::optional<Tensor> min_seqlen_value;
24894   std::optional<int64_t> min_seqlen_bdim;
24895   if (min_seqlen) {
24896       std::tie(min_seqlen_value, min_seqlen_bdim) = unwrapTensorAtLevel(min_seqlen.value(), cur_level);
24897   }
24898   std::optional<Tensor> max_seqlen_value;
24899   std::optional<int64_t> max_seqlen_bdim;
24900   if (max_seqlen) {
24901       std::tie(max_seqlen_value, max_seqlen_bdim) = unwrapTensorAtLevel(max_seqlen.value(), cur_level);
24902   }
24903   auto results = batch_rule(padded_value, padded_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, ragged_idx, min_seqlen_value, min_seqlen_bdim, max_seqlen_value, max_seqlen_bdim, sum_S);
24904   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24905 }
24906 template <typename batch_rule_t, batch_rule_t batch_rule>
24907 at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) {
24908   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24909   auto maybe_layer = maybeCurrentDynamicLayer();
24910   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24911   int64_t cur_level = maybe_layer->layerId();
24912   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) {
24913     return at::_ops::_nested_tensor_softmax_with_shape::call(self, query);
24914   }
24915   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24916   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
24917   auto results = batch_rule(self_value, self_bdim, query_value, query_bdim);
24918   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24919 }
24920 template <typename batch_rule_t, batch_rule_t batch_rule>
24921 at::Tensor _safe_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
24922   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24923   auto maybe_layer = maybeCurrentDynamicLayer();
24924   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24925   int64_t cur_level = maybe_layer->layerId();
24926   if (!isBatchedAtLevel(self, cur_level)) {
24927     return at::_ops::_safe_softmax::call(self, dim, dtype);
24928   }
24929   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
24930   auto results = batch_rule(self_value, self_bdim, dim, dtype);
24931   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24932 }
24933 template <typename batch_rule_t, batch_rule_t batch_rule>
24934 at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type) {
24935   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24936   auto maybe_layer = maybeCurrentDynamicLayer();
24937   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24938   int64_t cur_level = maybe_layer->layerId();
24939   if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
24940     return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
24941   }
24942   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
24943   auto [qkv_weight_value, qkv_weight_bdim] = unwrapTensorAtLevel(qkv_weight, cur_level);
24944   auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level);
24945   auto [proj_weight_value, proj_weight_bdim] = unwrapTensorAtLevel(proj_weight, cur_level);
24946   auto [proj_bias_value, proj_bias_bdim] = unwrapTensorAtLevel(proj_bias, cur_level);
24947   auto [norm_weight_1_value, norm_weight_1_bdim] = unwrapTensorAtLevel(norm_weight_1, cur_level);
24948   auto [norm_bias_1_value, norm_bias_1_bdim] = unwrapTensorAtLevel(norm_bias_1, cur_level);
24949   auto [norm_weight_2_value, norm_weight_2_bdim] = unwrapTensorAtLevel(norm_weight_2, cur_level);
24950   auto [norm_bias_2_value, norm_bias_2_bdim] = unwrapTensorAtLevel(norm_bias_2, cur_level);
24951   auto [ffn_weight_1_value, ffn_weight_1_bdim] = unwrapTensorAtLevel(ffn_weight_1, cur_level);
24952   auto [ffn_bias_1_value, ffn_bias_1_bdim] = unwrapTensorAtLevel(ffn_bias_1, cur_level);
24953   auto [ffn_weight_2_value, ffn_weight_2_bdim] = unwrapTensorAtLevel(ffn_weight_2, cur_level);
24954   auto [ffn_bias_2_value, ffn_bias_2_bdim] = unwrapTensorAtLevel(ffn_bias_2, cur_level);
24955   std::optional<Tensor> mask_value;
24956   std::optional<int64_t> mask_bdim;
24957   if (mask) {
24958       std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
24959   }
24960   auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type);
24961   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24962 }
24963 template <typename batch_rule_t, batch_rule_t batch_rule>
24964 ::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type) {
24965   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24966   auto maybe_layer = maybeCurrentDynamicLayer();
24967   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24968   int64_t cur_level = maybe_layer->layerId();
24969   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
24970     return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
24971   }
24972   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
24973   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
24974   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
24975   auto [qkv_weight_value, qkv_weight_bdim] = unwrapTensorAtLevel(qkv_weight, cur_level);
24976   auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level);
24977   auto [proj_weight_value, proj_weight_bdim] = unwrapTensorAtLevel(proj_weight, cur_level);
24978   auto [proj_bias_value, proj_bias_bdim] = unwrapTensorAtLevel(proj_bias, cur_level);
24979   std::optional<Tensor> mask_value;
24980   std::optional<int64_t> mask_bdim;
24981   if (mask) {
24982       std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
24983   }
24984   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type);
24985   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
24986 }
24987 template <typename batch_rule_t, batch_rule_t batch_rule>
24988 at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
24989   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24990   auto maybe_layer = maybeCurrentDynamicLayer();
24991   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24992   int64_t cur_level = maybe_layer->layerId();
24993   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
24994     return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
24995   }
24996   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
24997   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
24998   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
24999   std::optional<Tensor> attn_mask_value;
25000   std::optional<int64_t> attn_mask_bdim;
25001   if (attn_mask) {
25002       std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
25003   }
25004   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, scale, enable_gqa);
25005   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25006 }
25007 template <typename batch_rule_t, batch_rule_t batch_rule>
25008 ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale, bool enable_gqa) {
25009   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25010   auto maybe_layer = maybeCurrentDynamicLayer();
25011   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25012   int64_t cur_level = maybe_layer->layerId();
25013   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) {
25014     return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa);
25015   }
25016   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25017   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25018   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25019   std::optional<Tensor> attn_mask_value;
25020   std::optional<int64_t> attn_mask_bdim;
25021   if (attn_mask) {
25022       std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
25023   }
25024   std::optional<Tensor> dropout_mask_value;
25025   std::optional<int64_t> dropout_mask_bdim;
25026   if (dropout_mask) {
25027       std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level);
25028   }
25029   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim, scale, enable_gqa);
25030   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
25031 }
25032 template <typename batch_rule_t, batch_rule_t batch_rule>
25033 ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_for_mps_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale) {
25034   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25035   auto maybe_layer = maybeCurrentDynamicLayer();
25036   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25037   int64_t cur_level = maybe_layer->layerId();
25038   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) {
25039     return at::_ops::_scaled_dot_product_attention_math_for_mps::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale);
25040   }
25041   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25042   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25043   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25044   std::optional<Tensor> attn_mask_value;
25045   std::optional<int64_t> attn_mask_bdim;
25046   if (attn_mask) {
25047       std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
25048   }
25049   std::optional<Tensor> dropout_mask_value;
25050   std::optional<int64_t> dropout_mask_bdim;
25051   if (dropout_mask) {
25052       std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level);
25053   }
25054   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim, scale);
25055   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
25056 }
25057 template <typename batch_rule_t, batch_rule_t batch_rule>
25058 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
25059   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25060   auto maybe_layer = maybeCurrentDynamicLayer();
25061   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25062   int64_t cur_level = maybe_layer->layerId();
25063   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) {
25064     return at::_ops::_scaled_dot_product_flash_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask, scale);
25065   }
25066   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25067   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25068   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25069   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, return_debug_mask, scale);
25070   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), std::get<8>(results), std::get<9>(results), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level), makeBatched(std::get<14>(results), std::get<15>(results), cur_level));
25071 }
25072 template <typename batch_rule_t, batch_rule_t batch_rule>
25073 ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
25074   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25075   auto maybe_layer = maybeCurrentDynamicLayer();
25076   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25077   int64_t cur_level = maybe_layer->layerId();
25078   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
25079     return at::_ops::_scaled_dot_product_flash_attention_for_cpu::call(query, key, value, dropout_p, is_causal, attn_mask, scale);
25080   }
25081   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25082   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25083   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25084   std::optional<Tensor> attn_mask_value;
25085   std::optional<int64_t> attn_mask_bdim;
25086   if (attn_mask) {
25087       std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
25088   }
25089   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale);
25090   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
25091 }
25092 template <typename batch_rule_t, batch_rule_t batch_rule>
25093 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
25094   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25095   auto maybe_layer = maybeCurrentDynamicLayer();
25096   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25097   int64_t cur_level = maybe_layer->layerId();
25098   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
25099     return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
25100   }
25101   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
25102   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25103   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25104   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25105   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25106   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25107   auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level);
25108   auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level);
25109   auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level);
25110   auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level);
25111   auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale);
25112   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
25113 }
25114 template <typename batch_rule_t, batch_rule_t batch_rule>
25115 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
25116   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25117   auto maybe_layer = maybeCurrentDynamicLayer();
25118   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25119   int64_t cur_level = maybe_layer->layerId();
25120   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
25121     return at::_ops::_scaled_dot_product_flash_attention_for_cpu_backward::call(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale);
25122   }
25123   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
25124   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25125   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25126   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25127   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25128   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25129   std::optional<Tensor> attn_mask_value;
25130   std::optional<int64_t> attn_mask_bdim;
25131   if (attn_mask) {
25132       std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
25133   }
25134   auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale);
25135   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
25136 }
25137 template <typename batch_rule_t, batch_rule_t batch_rule>
25138 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
25139   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25140   auto maybe_layer = maybeCurrentDynamicLayer();
25141   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25142   int64_t cur_level = maybe_layer->layerId();
25143   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
25144     return at::_ops::_scaled_dot_product_fused_attention_overrideable_backward::call(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
25145   }
25146   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
25147   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25148   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25149   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25150   auto [attn_bias_value, attn_bias_bdim] = unwrapTensorAtLevel(attn_bias, cur_level);
25151   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25152   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25153   auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level);
25154   auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level);
25155   auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level);
25156   auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level);
25157   auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, grad_input_mask, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale);
25158   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
25159 }
25160 template <typename batch_rule_t, batch_rule_t batch_rule>
25161 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, ::std::optional<double> scale) {
25162   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25163   auto maybe_layer = maybeCurrentDynamicLayer();
25164   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25165   int64_t cur_level = maybe_layer->layerId();
25166   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level)) {
25167     return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale);
25168   }
25169   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25170   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25171   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25172   std::optional<Tensor> attn_bias_value;
25173   std::optional<int64_t> attn_bias_bdim;
25174   if (attn_bias) {
25175       std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias.value(), cur_level);
25176   }
25177   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, compute_log_sumexp, dropout_p, is_causal, scale);
25178   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
25179 }
25180 template <typename batch_rule_t, batch_rule_t batch_rule>
25181 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal, ::std::optional<double> scale) {
25182   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25183   auto maybe_layer = maybeCurrentDynamicLayer();
25184   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25185   int64_t cur_level = maybe_layer->layerId();
25186   if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
25187     return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale);
25188   }
25189   auto [grad_out__value, grad_out__bdim] = unwrapTensorAtLevel(grad_out_, cur_level);
25190   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25191   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25192   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25193   auto [attn_bias_value, attn_bias_bdim] = unwrapTensorAtLevel(attn_bias, cur_level);
25194   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25195   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25196   auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level);
25197   auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level);
25198   auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, dropout_p, grad_input_mask, is_causal, scale);
25199   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
25200 }
25201 template <typename batch_rule_t, batch_rule_t batch_rule>
25202 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
25203   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25204   auto maybe_layer = maybeCurrentDynamicLayer();
25205   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25206   int64_t cur_level = maybe_layer->layerId();
25207   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level)) {
25208     return at::_ops::_scaled_dot_product_cudnn_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
25209   }
25210   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25211   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25212   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25213   std::optional<Tensor> attn_bias_value;
25214   std::optional<int64_t> attn_bias_bdim;
25215   if (attn_bias) {
25216       std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias.value(), cur_level);
25217   }
25218   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
25219   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), std::get<8>(results), std::get<9>(results), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level), makeBatched(std::get<14>(results), std::get<15>(results), cur_level));
25220 }
25221 template <typename batch_rule_t, batch_rule_t batch_rule>
25222 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional<double> scale) {
25223   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25224   auto maybe_layer = maybeCurrentDynamicLayer();
25225   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25226   int64_t cur_level = maybe_layer->layerId();
25227   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
25228     return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale);
25229   }
25230   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
25231   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25232   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25233   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25234   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25235   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25236   auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level);
25237   auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level);
25238   auto [attn_bias_value, attn_bias_bdim] = unwrapTensorAtLevel(attn_bias, cur_level);
25239   auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level);
25240   auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level);
25241   auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, attn_bias_value, attn_bias_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, scale);
25242   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
25243 }
25244 template <typename batch_rule_t, batch_rule_t batch_rule>
25245 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _flash_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & cum_seq_q, const ::std::optional<at::Tensor> & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale, ::std::optional<c10::SymInt> window_size_left, ::std::optional<c10::SymInt> window_size_right, const ::std::optional<at::Tensor> & seqused_k, const ::std::optional<at::Tensor> & alibi_slopes) {
25246   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25247   auto maybe_layer = maybeCurrentDynamicLayer();
25248   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25249   int64_t cur_level = maybe_layer->layerId();
25250   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(seqused_k, cur_level) && !isBatchedAtLevel(alibi_slopes, cur_level)) {
25251     return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes);
25252   }
25253   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25254   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25255   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25256   std::optional<Tensor> cum_seq_q_value;
25257   std::optional<int64_t> cum_seq_q_bdim;
25258   if (cum_seq_q) {
25259       std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q.value(), cur_level);
25260   }
25261   std::optional<Tensor> cum_seq_k_value;
25262   std::optional<int64_t> cum_seq_k_bdim;
25263   if (cum_seq_k) {
25264       std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k.value(), cur_level);
25265   }
25266   std::optional<Tensor> seqused_k_value;
25267   std::optional<int64_t> seqused_k_bdim;
25268   if (seqused_k) {
25269       std::tie(seqused_k_value, seqused_k_bdim) = unwrapTensorAtLevel(seqused_k.value(), cur_level);
25270   }
25271   std::optional<Tensor> alibi_slopes_value;
25272   std::optional<int64_t> alibi_slopes_bdim;
25273   if (alibi_slopes) {
25274       std::tie(alibi_slopes_value, alibi_slopes_bdim) = unwrapTensorAtLevel(alibi_slopes.value(), cur_level);
25275   }
25276   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k_value, seqused_k_bdim, alibi_slopes_value, alibi_slopes_bdim);
25277   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
25278 }
25279 template <typename batch_rule_t, batch_rule_t batch_rule>
25280 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & rng_state, const at::Tensor & unused, ::std::optional<double> scale, ::std::optional<c10::SymInt> window_size_left, ::std::optional<c10::SymInt> window_size_right) {
25281   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25282   auto maybe_layer = maybeCurrentDynamicLayer();
25283   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25284   int64_t cur_level = maybe_layer->layerId();
25285   if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(rng_state, cur_level) && !isBatchedAtLevel(unused, cur_level)) {
25286     return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, rng_state, unused, scale, window_size_left, window_size_right);
25287   }
25288   auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
25289   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25290   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25291   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25292   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25293   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25294   auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level);
25295   auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level);
25296   auto [rng_state_value, rng_state_bdim] = unwrapTensorAtLevel(rng_state, cur_level);
25297   auto [unused_value, unused_bdim] = unwrapTensorAtLevel(unused, cur_level);
25298   auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, rng_state_value, rng_state_bdim, unused_value, unused_bdim, scale, window_size_left, window_size_right);
25299   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
25300 }
25301 template <typename batch_rule_t, batch_rule_t batch_rule>
25302 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const at::Tensor & out, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional<double> scale, ::std::optional<int64_t> num_splits_key, ::std::optional<int64_t> window_size, bool shared_storage_dqdkdv) {
25303   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25304   auto maybe_layer = maybeCurrentDynamicLayer();
25305   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25306   int64_t cur_level = maybe_layer->layerId();
25307   if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
25308     return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv);
25309   }
25310   auto [grad_out__value, grad_out__bdim] = unwrapTensorAtLevel(grad_out_, cur_level);
25311   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25312   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25313   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25314   auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level);
25315   auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level);
25316   auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level);
25317   auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level);
25318   std::optional<Tensor> bias_value;
25319   std::optional<int64_t> bias_bdim;
25320   if (bias) {
25321       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25322   }
25323   std::optional<Tensor> cu_seqlens_q_value;
25324   std::optional<int64_t> cu_seqlens_q_bdim;
25325   if (cu_seqlens_q) {
25326       std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level);
25327   }
25328   std::optional<Tensor> cu_seqlens_k_value;
25329   std::optional<int64_t> cu_seqlens_k_bdim;
25330   if (cu_seqlens_k) {
25331       std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level);
25332   }
25333   auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, bias_value, bias_bdim, out_value, out_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, max_seqlen_k, logsumexp_value, logsumexp_bdim, dropout_p, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv);
25334   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
25335 }
25336 template <typename batch_rule_t, batch_rule_t batch_rule>
25337 at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
25338   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25339   auto maybe_layer = maybeCurrentDynamicLayer();
25340   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25341   int64_t cur_level = maybe_layer->layerId();
25342   if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) {
25343     return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
25344   }
25345   auto [q_value, q_bdim] = unwrapTensorAtLevel(q, cur_level);
25346   auto [k_value, k_bdim] = unwrapTensorAtLevel(k, cur_level);
25347   auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level);
25348   auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p);
25349   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25350 }
25351 template <typename batch_rule_t, batch_rule_t batch_rule>
25352 at::Tensor & _fill_mem_eff_dropout_mask__generated_plumbing(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) {
25353   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25354   auto maybe_layer = maybeCurrentDynamicLayer();
25355   vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
25356   int64_t cur_level = maybe_layer->layerId();
25357   if (!isBatchedAtLevel(self, cur_level)) {
25358     return at::_ops::_fill_mem_eff_dropout_mask_::call(self, dropout_p, seed, offset);
25359   }
25360   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25361   batch_rule(self_value, self_bdim, dropout_p, seed, offset);
25362   return self;
25363 }
25364 template <typename batch_rule_t, batch_rule_t batch_rule>
25365 at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask) {
25366   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25367   auto maybe_layer = maybeCurrentDynamicLayer();
25368   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25369   int64_t cur_level = maybe_layer->layerId();
25370   if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
25371     return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
25372   }
25373   auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level);
25374   auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level);
25375   auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level);
25376   auto [qkv_weight_value, qkv_weight_bdim] = unwrapTensorAtLevel(qkv_weight, cur_level);
25377   auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level);
25378   auto [proj_weight_value, proj_weight_bdim] = unwrapTensorAtLevel(proj_weight, cur_level);
25379   auto [proj_bias_value, proj_bias_bdim] = unwrapTensorAtLevel(proj_bias, cur_level);
25380   std::optional<Tensor> mask_value;
25381   std::optional<int64_t> mask_bdim;
25382   if (mask) {
25383       std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
25384   }
25385   auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim);
25386   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25387 }
25388 template <typename batch_rule_t, batch_rule_t batch_rule>
25389 at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) {
25390   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25391   auto maybe_layer = maybeCurrentDynamicLayer();
25392   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25393   int64_t cur_level = maybe_layer->layerId();
25394   if (!isBatchedAtLevel(x, cur_level)) {
25395     return at::_ops::special_airy_ai::call(x);
25396   }
25397   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25398   auto results = batch_rule(x_value, x_bdim);
25399   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25400 }
25401 template <typename batch_rule_t, batch_rule_t batch_rule>
25402 at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) {
25403   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25404   auto maybe_layer = maybeCurrentDynamicLayer();
25405   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25406   int64_t cur_level = maybe_layer->layerId();
25407   if (!isBatchedAtLevel(self, cur_level)) {
25408     return at::_ops::special_bessel_j0::call(self);
25409   }
25410   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25411   auto results = batch_rule(self_value, self_bdim);
25412   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25413 }
25414 template <typename batch_rule_t, batch_rule_t batch_rule>
25415 at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) {
25416   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25417   auto maybe_layer = maybeCurrentDynamicLayer();
25418   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25419   int64_t cur_level = maybe_layer->layerId();
25420   if (!isBatchedAtLevel(self, cur_level)) {
25421     return at::_ops::special_bessel_j1::call(self);
25422   }
25423   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25424   auto results = batch_rule(self_value, self_bdim);
25425   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25426 }
25427 template <typename batch_rule_t, batch_rule_t batch_rule>
25428 at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) {
25429   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25430   auto maybe_layer = maybeCurrentDynamicLayer();
25431   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25432   int64_t cur_level = maybe_layer->layerId();
25433   if (!isBatchedAtLevel(self, cur_level)) {
25434     return at::_ops::special_bessel_y0::call(self);
25435   }
25436   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25437   auto results = batch_rule(self_value, self_bdim);
25438   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25439 }
25440 template <typename batch_rule_t, batch_rule_t batch_rule>
25441 at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) {
25442   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25443   auto maybe_layer = maybeCurrentDynamicLayer();
25444   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25445   int64_t cur_level = maybe_layer->layerId();
25446   if (!isBatchedAtLevel(self, cur_level)) {
25447     return at::_ops::special_bessel_y1::call(self);
25448   }
25449   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25450   auto results = batch_rule(self_value, self_bdim);
25451   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25452 }
25453 template <typename batch_rule_t, batch_rule_t batch_rule>
25454 at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25455   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25456   auto maybe_layer = maybeCurrentDynamicLayer();
25457   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25458   int64_t cur_level = maybe_layer->layerId();
25459   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25460     return at::_ops::special_chebyshev_polynomial_t::call(x, n);
25461   }
25462   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25463   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25464   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25465   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25466 }
25467 template <typename batch_rule_t, batch_rule_t batch_rule>
25468 at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25469   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25470   auto maybe_layer = maybeCurrentDynamicLayer();
25471   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25472   int64_t cur_level = maybe_layer->layerId();
25473   if (!isBatchedAtLevel(n, cur_level)) {
25474     return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n);
25475   }
25476   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25477   auto results = batch_rule(x, n_value, n_bdim);
25478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25479 }
25480 template <typename batch_rule_t, batch_rule_t batch_rule>
25481 at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25483   auto maybe_layer = maybeCurrentDynamicLayer();
25484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25485   int64_t cur_level = maybe_layer->layerId();
25486   if (!isBatchedAtLevel(x, cur_level)) {
25487     return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n);
25488   }
25489   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25490   auto results = batch_rule(x_value, x_bdim, n);
25491   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25492 }
25493 template <typename batch_rule_t, batch_rule_t batch_rule>
25494 at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25495   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25496   auto maybe_layer = maybeCurrentDynamicLayer();
25497   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25498   int64_t cur_level = maybe_layer->layerId();
25499   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25500     return at::_ops::special_chebyshev_polynomial_u::call(x, n);
25501   }
25502   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25503   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25504   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25505   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25506 }
25507 template <typename batch_rule_t, batch_rule_t batch_rule>
25508 at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25509   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25510   auto maybe_layer = maybeCurrentDynamicLayer();
25511   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25512   int64_t cur_level = maybe_layer->layerId();
25513   if (!isBatchedAtLevel(n, cur_level)) {
25514     return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
25515   }
25516   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25517   auto results = batch_rule(x, n_value, n_bdim);
25518   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25519 }
25520 template <typename batch_rule_t, batch_rule_t batch_rule>
25521 at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25522   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25523   auto maybe_layer = maybeCurrentDynamicLayer();
25524   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25525   int64_t cur_level = maybe_layer->layerId();
25526   if (!isBatchedAtLevel(x, cur_level)) {
25527     return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
25528   }
25529   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25530   auto results = batch_rule(x_value, x_bdim, n);
25531   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25532 }
25533 template <typename batch_rule_t, batch_rule_t batch_rule>
25534 at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25535   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25536   auto maybe_layer = maybeCurrentDynamicLayer();
25537   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25538   int64_t cur_level = maybe_layer->layerId();
25539   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25540     return at::_ops::special_chebyshev_polynomial_v::call(x, n);
25541   }
25542   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25543   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25544   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25545   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25546 }
25547 template <typename batch_rule_t, batch_rule_t batch_rule>
25548 at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25549   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25550   auto maybe_layer = maybeCurrentDynamicLayer();
25551   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25552   int64_t cur_level = maybe_layer->layerId();
25553   if (!isBatchedAtLevel(n, cur_level)) {
25554     return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n);
25555   }
25556   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25557   auto results = batch_rule(x, n_value, n_bdim);
25558   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25559 }
25560 template <typename batch_rule_t, batch_rule_t batch_rule>
25561 at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25562   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25563   auto maybe_layer = maybeCurrentDynamicLayer();
25564   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25565   int64_t cur_level = maybe_layer->layerId();
25566   if (!isBatchedAtLevel(x, cur_level)) {
25567     return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n);
25568   }
25569   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25570   auto results = batch_rule(x_value, x_bdim, n);
25571   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25572 }
25573 template <typename batch_rule_t, batch_rule_t batch_rule>
25574 at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25575   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25576   auto maybe_layer = maybeCurrentDynamicLayer();
25577   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25578   int64_t cur_level = maybe_layer->layerId();
25579   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25580     return at::_ops::special_chebyshev_polynomial_w::call(x, n);
25581   }
25582   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25583   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25584   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25585   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25586 }
25587 template <typename batch_rule_t, batch_rule_t batch_rule>
25588 at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25589   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25590   auto maybe_layer = maybeCurrentDynamicLayer();
25591   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25592   int64_t cur_level = maybe_layer->layerId();
25593   if (!isBatchedAtLevel(n, cur_level)) {
25594     return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n);
25595   }
25596   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25597   auto results = batch_rule(x, n_value, n_bdim);
25598   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25599 }
25600 template <typename batch_rule_t, batch_rule_t batch_rule>
25601 at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25602   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25603   auto maybe_layer = maybeCurrentDynamicLayer();
25604   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25605   int64_t cur_level = maybe_layer->layerId();
25606   if (!isBatchedAtLevel(x, cur_level)) {
25607     return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n);
25608   }
25609   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25610   auto results = batch_rule(x_value, x_bdim, n);
25611   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25612 }
25613 template <typename batch_rule_t, batch_rule_t batch_rule>
25614 at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25615   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25616   auto maybe_layer = maybeCurrentDynamicLayer();
25617   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25618   int64_t cur_level = maybe_layer->layerId();
25619   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25620     return at::_ops::special_hermite_polynomial_h::call(x, n);
25621   }
25622   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25623   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25624   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25625   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25626 }
25627 template <typename batch_rule_t, batch_rule_t batch_rule>
25628 at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25629   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25630   auto maybe_layer = maybeCurrentDynamicLayer();
25631   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25632   int64_t cur_level = maybe_layer->layerId();
25633   if (!isBatchedAtLevel(n, cur_level)) {
25634     return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n);
25635   }
25636   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25637   auto results = batch_rule(x, n_value, n_bdim);
25638   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25639 }
25640 template <typename batch_rule_t, batch_rule_t batch_rule>
25641 at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25642   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25643   auto maybe_layer = maybeCurrentDynamicLayer();
25644   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25645   int64_t cur_level = maybe_layer->layerId();
25646   if (!isBatchedAtLevel(x, cur_level)) {
25647     return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n);
25648   }
25649   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25650   auto results = batch_rule(x_value, x_bdim, n);
25651   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25652 }
25653 template <typename batch_rule_t, batch_rule_t batch_rule>
25654 at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25655   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25656   auto maybe_layer = maybeCurrentDynamicLayer();
25657   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25658   int64_t cur_level = maybe_layer->layerId();
25659   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25660     return at::_ops::special_hermite_polynomial_he::call(x, n);
25661   }
25662   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25663   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25664   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25665   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25666 }
25667 template <typename batch_rule_t, batch_rule_t batch_rule>
25668 at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25669   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25670   auto maybe_layer = maybeCurrentDynamicLayer();
25671   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25672   int64_t cur_level = maybe_layer->layerId();
25673   if (!isBatchedAtLevel(n, cur_level)) {
25674     return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n);
25675   }
25676   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25677   auto results = batch_rule(x, n_value, n_bdim);
25678   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25679 }
25680 template <typename batch_rule_t, batch_rule_t batch_rule>
25681 at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25682   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25683   auto maybe_layer = maybeCurrentDynamicLayer();
25684   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25685   int64_t cur_level = maybe_layer->layerId();
25686   if (!isBatchedAtLevel(x, cur_level)) {
25687     return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n);
25688   }
25689   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25690   auto results = batch_rule(x_value, x_bdim, n);
25691   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25692 }
25693 template <typename batch_rule_t, batch_rule_t batch_rule>
25694 at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25695   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25696   auto maybe_layer = maybeCurrentDynamicLayer();
25697   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25698   int64_t cur_level = maybe_layer->layerId();
25699   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25700     return at::_ops::special_laguerre_polynomial_l::call(x, n);
25701   }
25702   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25703   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25704   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25705   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25706 }
25707 template <typename batch_rule_t, batch_rule_t batch_rule>
25708 at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25709   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25710   auto maybe_layer = maybeCurrentDynamicLayer();
25711   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25712   int64_t cur_level = maybe_layer->layerId();
25713   if (!isBatchedAtLevel(n, cur_level)) {
25714     return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n);
25715   }
25716   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25717   auto results = batch_rule(x, n_value, n_bdim);
25718   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25719 }
25720 template <typename batch_rule_t, batch_rule_t batch_rule>
25721 at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25722   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25723   auto maybe_layer = maybeCurrentDynamicLayer();
25724   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25725   int64_t cur_level = maybe_layer->layerId();
25726   if (!isBatchedAtLevel(x, cur_level)) {
25727     return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n);
25728   }
25729   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25730   auto results = batch_rule(x_value, x_bdim, n);
25731   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25732 }
25733 template <typename batch_rule_t, batch_rule_t batch_rule>
25734 at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25735   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25736   auto maybe_layer = maybeCurrentDynamicLayer();
25737   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25738   int64_t cur_level = maybe_layer->layerId();
25739   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25740     return at::_ops::special_legendre_polynomial_p::call(x, n);
25741   }
25742   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25743   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25744   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25745   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25746 }
25747 template <typename batch_rule_t, batch_rule_t batch_rule>
25748 at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25749   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25750   auto maybe_layer = maybeCurrentDynamicLayer();
25751   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25752   int64_t cur_level = maybe_layer->layerId();
25753   if (!isBatchedAtLevel(n, cur_level)) {
25754     return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n);
25755   }
25756   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25757   auto results = batch_rule(x, n_value, n_bdim);
25758   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25759 }
25760 template <typename batch_rule_t, batch_rule_t batch_rule>
25761 at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25762   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25763   auto maybe_layer = maybeCurrentDynamicLayer();
25764   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25765   int64_t cur_level = maybe_layer->layerId();
25766   if (!isBatchedAtLevel(x, cur_level)) {
25767     return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n);
25768   }
25769   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25770   auto results = batch_rule(x_value, x_bdim, n);
25771   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25772 }
25773 template <typename batch_rule_t, batch_rule_t batch_rule>
25774 at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) {
25775   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25776   auto maybe_layer = maybeCurrentDynamicLayer();
25777   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25778   int64_t cur_level = maybe_layer->layerId();
25779   if (!isBatchedAtLevel(self, cur_level)) {
25780     return at::_ops::special_modified_bessel_i0::call(self);
25781   }
25782   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25783   auto results = batch_rule(self_value, self_bdim);
25784   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25785 }
25786 template <typename batch_rule_t, batch_rule_t batch_rule>
25787 at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) {
25788   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25789   auto maybe_layer = maybeCurrentDynamicLayer();
25790   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25791   int64_t cur_level = maybe_layer->layerId();
25792   if (!isBatchedAtLevel(self, cur_level)) {
25793     return at::_ops::special_modified_bessel_i1::call(self);
25794   }
25795   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25796   auto results = batch_rule(self_value, self_bdim);
25797   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25798 }
25799 template <typename batch_rule_t, batch_rule_t batch_rule>
25800 at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) {
25801   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25802   auto maybe_layer = maybeCurrentDynamicLayer();
25803   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25804   int64_t cur_level = maybe_layer->layerId();
25805   if (!isBatchedAtLevel(self, cur_level)) {
25806     return at::_ops::special_modified_bessel_k0::call(self);
25807   }
25808   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25809   auto results = batch_rule(self_value, self_bdim);
25810   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25811 }
25812 template <typename batch_rule_t, batch_rule_t batch_rule>
25813 at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) {
25814   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25815   auto maybe_layer = maybeCurrentDynamicLayer();
25816   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25817   int64_t cur_level = maybe_layer->layerId();
25818   if (!isBatchedAtLevel(self, cur_level)) {
25819     return at::_ops::special_modified_bessel_k1::call(self);
25820   }
25821   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
25822   auto results = batch_rule(self_value, self_bdim);
25823   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25824 }
25825 template <typename batch_rule_t, batch_rule_t batch_rule>
25826 at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) {
25827   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25828   auto maybe_layer = maybeCurrentDynamicLayer();
25829   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25830   int64_t cur_level = maybe_layer->layerId();
25831   if (!isBatchedAtLevel(x, cur_level)) {
25832     return at::_ops::special_scaled_modified_bessel_k0::call(x);
25833   }
25834   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25835   auto results = batch_rule(x_value, x_bdim);
25836   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25837 }
25838 template <typename batch_rule_t, batch_rule_t batch_rule>
25839 at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) {
25840   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25841   auto maybe_layer = maybeCurrentDynamicLayer();
25842   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25843   int64_t cur_level = maybe_layer->layerId();
25844   if (!isBatchedAtLevel(x, cur_level)) {
25845     return at::_ops::special_scaled_modified_bessel_k1::call(x);
25846   }
25847   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25848   auto results = batch_rule(x_value, x_bdim);
25849   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25850 }
25851 template <typename batch_rule_t, batch_rule_t batch_rule>
25852 at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25853   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25854   auto maybe_layer = maybeCurrentDynamicLayer();
25855   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25856   int64_t cur_level = maybe_layer->layerId();
25857   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25858     return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n);
25859   }
25860   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25861   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25862   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25863   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25864 }
25865 template <typename batch_rule_t, batch_rule_t batch_rule>
25866 at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25867   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25868   auto maybe_layer = maybeCurrentDynamicLayer();
25869   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25870   int64_t cur_level = maybe_layer->layerId();
25871   if (!isBatchedAtLevel(n, cur_level)) {
25872     return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n);
25873   }
25874   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25875   auto results = batch_rule(x, n_value, n_bdim);
25876   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25877 }
25878 template <typename batch_rule_t, batch_rule_t batch_rule>
25879 at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25880   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25881   auto maybe_layer = maybeCurrentDynamicLayer();
25882   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25883   int64_t cur_level = maybe_layer->layerId();
25884   if (!isBatchedAtLevel(x, cur_level)) {
25885     return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n);
25886   }
25887   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25888   auto results = batch_rule(x_value, x_bdim, n);
25889   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25890 }
25891 template <typename batch_rule_t, batch_rule_t batch_rule>
25892 at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25893   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25894   auto maybe_layer = maybeCurrentDynamicLayer();
25895   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25896   int64_t cur_level = maybe_layer->layerId();
25897   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25898     return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n);
25899   }
25900   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25901   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25902   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25903   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25904 }
25905 template <typename batch_rule_t, batch_rule_t batch_rule>
25906 at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25907   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25908   auto maybe_layer = maybeCurrentDynamicLayer();
25909   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25910   int64_t cur_level = maybe_layer->layerId();
25911   if (!isBatchedAtLevel(n, cur_level)) {
25912     return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n);
25913   }
25914   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25915   auto results = batch_rule(x, n_value, n_bdim);
25916   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25917 }
25918 template <typename batch_rule_t, batch_rule_t batch_rule>
25919 at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25920   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25921   auto maybe_layer = maybeCurrentDynamicLayer();
25922   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25923   int64_t cur_level = maybe_layer->layerId();
25924   if (!isBatchedAtLevel(x, cur_level)) {
25925     return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n);
25926   }
25927   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25928   auto results = batch_rule(x_value, x_bdim, n);
25929   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25930 }
25931 template <typename batch_rule_t, batch_rule_t batch_rule>
25932 at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25933   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25934   auto maybe_layer = maybeCurrentDynamicLayer();
25935   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25936   int64_t cur_level = maybe_layer->layerId();
25937   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25938     return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n);
25939   }
25940   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25941   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25942   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25943   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25944 }
25945 template <typename batch_rule_t, batch_rule_t batch_rule>
25946 at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25947   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25948   auto maybe_layer = maybeCurrentDynamicLayer();
25949   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25950   int64_t cur_level = maybe_layer->layerId();
25951   if (!isBatchedAtLevel(n, cur_level)) {
25952     return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n);
25953   }
25954   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25955   auto results = batch_rule(x, n_value, n_bdim);
25956   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25957 }
25958 template <typename batch_rule_t, batch_rule_t batch_rule>
25959 at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
25960   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25961   auto maybe_layer = maybeCurrentDynamicLayer();
25962   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25963   int64_t cur_level = maybe_layer->layerId();
25964   if (!isBatchedAtLevel(x, cur_level)) {
25965     return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n);
25966   }
25967   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25968   auto results = batch_rule(x_value, x_bdim, n);
25969   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25970 }
25971 template <typename batch_rule_t, batch_rule_t batch_rule>
25972 at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
25973   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25974   auto maybe_layer = maybeCurrentDynamicLayer();
25975   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25976   int64_t cur_level = maybe_layer->layerId();
25977   if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
25978     return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n);
25979   }
25980   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
25981   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25982   auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
25983   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25984 }
25985 template <typename batch_rule_t, batch_rule_t batch_rule>
25986 at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
25987   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25988   auto maybe_layer = maybeCurrentDynamicLayer();
25989   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25990   int64_t cur_level = maybe_layer->layerId();
25991   if (!isBatchedAtLevel(n, cur_level)) {
25992     return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n);
25993   }
25994   auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level);
25995   auto results = batch_rule(x, n_value, n_bdim);
25996   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25997 }
25998 template <typename batch_rule_t, batch_rule_t batch_rule>
25999 at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
26000   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26001   auto maybe_layer = maybeCurrentDynamicLayer();
26002   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26003   int64_t cur_level = maybe_layer->layerId();
26004   if (!isBatchedAtLevel(x, cur_level)) {
26005     return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n);
26006   }
26007   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
26008   auto results = batch_rule(x_value, x_bdim, n);
26009   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26010 }
26011 template <typename batch_rule_t, batch_rule_t batch_rule>
26012 at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) {
26013   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26014   auto maybe_layer = maybeCurrentDynamicLayer();
26015   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26016   int64_t cur_level = maybe_layer->layerId();
26017   if (!isBatchedAtLevel(x, cur_level)) {
26018     return at::_ops::special_spherical_bessel_j0::call(x);
26019   }
26020   auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level);
26021   auto results = batch_rule(x_value, x_bdim);
26022   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26023 }
26024 template <typename batch_rule_t, batch_rule_t batch_rule>
26025 at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
26026   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26027   auto maybe_layer = maybeCurrentDynamicLayer();
26028   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26029   int64_t cur_level = maybe_layer->layerId();
26030   if (!isBatchedAtLevel(self, cur_level)) {
26031     return at::_ops::_foobar::call(self, arg1, arg2, arg3);
26032   }
26033   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26034   auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3);
26035   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26036 }
26037 template <typename batch_rule_t, batch_rule_t batch_rule>
26038 void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26039   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26040   auto maybe_layer = maybeCurrentDynamicLayer();
26041   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26042   int64_t cur_level = maybe_layer->layerId();
26043   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26044     return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
26045   }
26046   std::optional<Tensor> grad_scale_value;
26047   std::optional<int64_t> grad_scale_bdim;
26048   if (grad_scale) {
26049       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26050   }
26051   std::optional<Tensor> found_inf_value;
26052   std::optional<int64_t> found_inf_bdim;
26053   if (found_inf) {
26054       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26055   }
26056   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26057 }
26058 template <typename batch_rule_t, batch_rule_t batch_rule>
26059 void _fused_adam__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26060   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26061   auto maybe_layer = maybeCurrentDynamicLayer();
26062   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26063   int64_t cur_level = maybe_layer->layerId();
26064   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26065     return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
26066   }
26067   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
26068   std::optional<Tensor> grad_scale_value;
26069   std::optional<int64_t> grad_scale_bdim;
26070   if (grad_scale) {
26071       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26072   }
26073   std::optional<Tensor> found_inf_value;
26074   std::optional<int64_t> found_inf_bdim;
26075   if (found_inf) {
26076       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26077   }
26078   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26079 }
26080 template <typename batch_rule_t, batch_rule_t batch_rule>
26081 void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26082   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26083   auto maybe_layer = maybeCurrentDynamicLayer();
26084   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26085   int64_t cur_level = maybe_layer->layerId();
26086   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26087     return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
26088   }
26089   std::optional<Tensor> grad_scale_value;
26090   std::optional<int64_t> grad_scale_bdim;
26091   if (grad_scale) {
26092       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26093   }
26094   std::optional<Tensor> found_inf_value;
26095   std::optional<int64_t> found_inf_bdim;
26096   if (found_inf) {
26097       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26098   }
26099   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26100 }
26101 template <typename batch_rule_t, batch_rule_t batch_rule>
26102 void _fused_adamw__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26103   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26104   auto maybe_layer = maybeCurrentDynamicLayer();
26105   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26106   int64_t cur_level = maybe_layer->layerId();
26107   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26108     return at::_ops::_fused_adamw__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
26109   }
26110   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
26111   std::optional<Tensor> grad_scale_value;
26112   std::optional<int64_t> grad_scale_bdim;
26113   if (grad_scale) {
26114       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26115   }
26116   std::optional<Tensor> found_inf_value;
26117   std::optional<int64_t> found_inf_bdim;
26118   if (found_inf) {
26119       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26120   }
26121   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26122 }
26123 template <typename batch_rule_t, batch_rule_t batch_rule>
26124 void _fused_sgd__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26125   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26126   auto maybe_layer = maybeCurrentDynamicLayer();
26127   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26128   int64_t cur_level = maybe_layer->layerId();
26129   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26130     return at::_ops::_fused_sgd_::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
26131   }
26132   std::optional<Tensor> grad_scale_value;
26133   std::optional<int64_t> grad_scale_bdim;
26134   if (grad_scale) {
26135       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26136   }
26137   std::optional<Tensor> found_inf_value;
26138   std::optional<int64_t> found_inf_bdim;
26139   if (found_inf) {
26140       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26141   }
26142   batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26143 }
26144 template <typename batch_rule_t, batch_rule_t batch_rule>
26145 void _fused_sgd__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26146   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26147   auto maybe_layer = maybeCurrentDynamicLayer();
26148   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26149   int64_t cur_level = maybe_layer->layerId();
26150   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26151     return at::_ops::_fused_sgd__tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
26152   }
26153   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
26154   std::optional<Tensor> grad_scale_value;
26155   std::optional<int64_t> grad_scale_bdim;
26156   if (grad_scale) {
26157       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26158   }
26159   std::optional<Tensor> found_inf_value;
26160   std::optional<int64_t> found_inf_bdim;
26161   if (found_inf) {
26162       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26163   }
26164   batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26165 }
26166 template <typename batch_rule_t, batch_rule_t batch_rule>
26167 void _fused_adagrad__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26168   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26169   auto maybe_layer = maybeCurrentDynamicLayer();
26170   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26171   int64_t cur_level = maybe_layer->layerId();
26172   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26173     return at::_ops::_fused_adagrad_::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
26174   }
26175   std::optional<Tensor> grad_scale_value;
26176   std::optional<int64_t> grad_scale_bdim;
26177   if (grad_scale) {
26178       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26179   }
26180   std::optional<Tensor> found_inf_value;
26181   std::optional<int64_t> found_inf_bdim;
26182   if (found_inf) {
26183       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26184   }
26185   batch_rule(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26186 }
26187 template <typename batch_rule_t, batch_rule_t batch_rule>
26188 void _fused_adagrad__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, const at::Tensor & lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
26189   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26190   auto maybe_layer = maybeCurrentDynamicLayer();
26191   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26192   int64_t cur_level = maybe_layer->layerId();
26193   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26194     return at::_ops::_fused_adagrad__tensor_lr::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
26195   }
26196   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
26197   std::optional<Tensor> grad_scale_value;
26198   std::optional<int64_t> grad_scale_bdim;
26199   if (grad_scale) {
26200       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
26201   }
26202   std::optional<Tensor> found_inf_value;
26203   std::optional<int64_t> found_inf_bdim;
26204   if (found_inf) {
26205       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
26206   }
26207   batch_rule(self, grads, state_sums, state_steps, lr_value, lr_bdim, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
26208 }
26209 template <typename batch_rule_t, batch_rule_t batch_rule>
26210 void _propagate_xla_data_generated_plumbing(const at::Tensor & input, const at::Tensor & output) {
26211   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26212   auto maybe_layer = maybeCurrentDynamicLayer();
26213   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26214   int64_t cur_level = maybe_layer->layerId();
26215   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
26216     return at::_ops::_propagate_xla_data::call(input, output);
26217   }
26218   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
26219   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
26220   batch_rule(input_value, input_bdim, output_value, output_bdim);
26221 }
26222 template <typename batch_rule_t, batch_rule_t batch_rule>
26223 void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
26224   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26225   auto maybe_layer = maybeCurrentDynamicLayer();
26226   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26227   int64_t cur_level = maybe_layer->layerId();
26228   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
26229     return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
26230   }
26231   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
26232   auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level);
26233   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
26234   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
26235   auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level);
26236   auto [out0_value, out0_bdim] = unwrapTensorAtLevel(out0, cur_level);
26237   auto [out1_value, out1_bdim] = unwrapTensorAtLevel(out1, cur_level);
26238   auto [out2_value, out2_bdim] = unwrapTensorAtLevel(out2, cur_level);
26239   std::optional<Tensor> cx_value;
26240   std::optional<int64_t> cx_bdim;
26241   if (cx) {
26242       std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
26243   }
26244   std::optional<Tensor> grad_output_value;
26245   std::optional<int64_t> grad_output_bdim;
26246   if (grad_output) {
26247       std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
26248   }
26249   std::optional<Tensor> grad_hy_value;
26250   std::optional<int64_t> grad_hy_bdim;
26251   if (grad_hy) {
26252       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
26253   }
26254   std::optional<Tensor> grad_cy_value;
26255   std::optional<int64_t> grad_cy_bdim;
26256   if (grad_cy) {
26257       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
26258   }
26259   std::optional<Tensor> dropout_state_value;
26260   std::optional<int64_t> dropout_state_bdim;
26261   if (dropout_state) {
26262       std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
26263   }
26264   batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
26265 }
26266 template <typename batch_rule_t, batch_rule_t batch_rule>
26267 at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
26268   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26269   auto maybe_layer = maybeCurrentDynamicLayer();
26270   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26271   int64_t cur_level = maybe_layer->layerId();
26272   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
26273     return at::_ops::bernoulli_Tensor::call(self, p, generator);
26274   }
26275   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26276   auto [p_value, p_bdim] = unwrapTensorAtLevel(p, cur_level);
26277   auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
26278   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26279 }
26280 template <typename batch_rule_t, batch_rule_t batch_rule>
26281 at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
26282   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26283   auto maybe_layer = maybeCurrentDynamicLayer();
26284   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26285   int64_t cur_level = maybe_layer->layerId();
26286   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
26287     return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type);
26288   }
26289   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26290   auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level);
26291   auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
26292   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26293 }
26294 template <typename batch_rule_t, batch_rule_t batch_rule>
26295 at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
26296   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26297   auto maybe_layer = maybeCurrentDynamicLayer();
26298   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26299   int64_t cur_level = maybe_layer->layerId();
26300   if (!isBatchedAtLevel(self, cur_level)) {
26301     return at::_ops::resize::call(self, size, memory_format);
26302   }
26303   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26304   auto results = batch_rule(self_value, self_bdim, size, memory_format);
26305   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26306 }
26307 template <typename batch_rule_t, batch_rule_t batch_rule>
26308 at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
26309   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26310   auto maybe_layer = maybeCurrentDynamicLayer();
26311   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26312   int64_t cur_level = maybe_layer->layerId();
26313   if (!isBatchedAtLevel(self, cur_level)) {
26314     return at::_ops::_resize_output::call(self, size, device);
26315   }
26316   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26317   auto results = batch_rule(self_value, self_bdim, size, device);
26318   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26319 }
26320 template <typename batch_rule_t, batch_rule_t batch_rule>
26321 at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
26322   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26323   auto maybe_layer = maybeCurrentDynamicLayer();
26324   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26325   int64_t cur_level = maybe_layer->layerId();
26326   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
26327     return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
26328   }
26329   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26330   auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level);
26331   auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
26332   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26333 }
26334 template <typename batch_rule_t, batch_rule_t batch_rule>
26335 void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
26336   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26337   auto maybe_layer = maybeCurrentDynamicLayer();
26338   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26339   int64_t cur_level = maybe_layer->layerId();
26340   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
26341     return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
26342   }
26343   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
26344   auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level);
26345   auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level);
26346   auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level);
26347   auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level);
26348   auto [out0_value, out0_bdim] = unwrapTensorAtLevel(out0, cur_level);
26349   auto [out1_value, out1_bdim] = unwrapTensorAtLevel(out1, cur_level);
26350   auto [out2_value, out2_bdim] = unwrapTensorAtLevel(out2, cur_level);
26351   std::optional<Tensor> cx_value;
26352   std::optional<int64_t> cx_bdim;
26353   if (cx) {
26354       std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
26355   }
26356   std::optional<Tensor> grad_output_value;
26357   std::optional<int64_t> grad_output_bdim;
26358   if (grad_output) {
26359       std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
26360   }
26361   std::optional<Tensor> grad_hy_value;
26362   std::optional<int64_t> grad_hy_bdim;
26363   if (grad_hy) {
26364       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
26365   }
26366   std::optional<Tensor> grad_cy_value;
26367   std::optional<int64_t> grad_cy_bdim;
26368   if (grad_cy) {
26369       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
26370   }
26371   std::optional<Tensor> dropout_state_value;
26372   std::optional<int64_t> dropout_state_bdim;
26373   if (dropout_state) {
26374       std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
26375   }
26376   batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
26377 }
26378 template <typename batch_rule_t, batch_rule_t batch_rule>
26379 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
26380   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26381   auto maybe_layer = maybeCurrentDynamicLayer();
26382   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26383   int64_t cur_level = maybe_layer->layerId();
26384   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
26385     return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
26386   }
26387   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
26388   auto [running_mean_value, running_mean_bdim] = unwrapTensorAtLevel(running_mean, cur_level);
26389   auto [running_var_value, running_var_bdim] = unwrapTensorAtLevel(running_var, cur_level);
26390   std::optional<Tensor> weight_value;
26391   std::optional<int64_t> weight_bdim;
26392   if (weight) {
26393       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
26394   }
26395   std::optional<Tensor> bias_value;
26396   std::optional<int64_t> bias_bdim;
26397   if (bias) {
26398       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
26399   }
26400   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
26401   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
26402 }
26403 template <typename batch_rule_t, batch_rule_t batch_rule>
26404 void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
26405   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26406   auto maybe_layer = maybeCurrentDynamicLayer();
26407   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26408   int64_t cur_level = maybe_layer->layerId();
26409   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26410     return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
26411   }
26412   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26413   batch_rule(self_value, self_bdim, split_size, dim, out);
26414 }
26415 template <typename batch_rule_t, batch_rule_t batch_rule>
26416 void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
26417   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26418   auto maybe_layer = maybeCurrentDynamicLayer();
26419   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26420   int64_t cur_level = maybe_layer->layerId();
26421   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26422     return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
26423   }
26424   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26425   batch_rule(self_value, self_bdim, split_sizes, dim, out);
26426 }
26427 template <typename batch_rule_t, batch_rule_t batch_rule>
26428 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update_functional_generated_plumbing(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
26429   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26430   auto maybe_layer = maybeCurrentDynamicLayer();
26431   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26432   int64_t cur_level = maybe_layer->layerId();
26433   if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
26434     return at::_ops::_batch_norm_with_update_functional::call(input, weight, bias, running_mean, running_var, momentum, eps);
26435   }
26436   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
26437   auto [running_mean_value, running_mean_bdim] = unwrapTensorAtLevel(running_mean, cur_level);
26438   auto [running_var_value, running_var_bdim] = unwrapTensorAtLevel(running_var, cur_level);
26439   std::optional<Tensor> weight_value;
26440   std::optional<int64_t> weight_bdim;
26441   if (weight) {
26442       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
26443   }
26444   std::optional<Tensor> bias_value;
26445   std::optional<int64_t> bias_bdim;
26446   if (bias) {
26447       std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
26448   }
26449   auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps);
26450   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
26451 }
26452 template <typename batch_rule_t, batch_rule_t batch_rule>
26453 at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format) {
26454   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26455   auto maybe_layer = maybeCurrentDynamicLayer();
26456   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26457   int64_t cur_level = maybe_layer->layerId();
26458   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
26459     return at::_ops::resize_as::call(self, the_template, memory_format);
26460   }
26461   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26462   auto [the_template_value, the_template_bdim] = unwrapTensorAtLevel(the_template, cur_level);
26463   auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
26464   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26465 }
26466 template <typename batch_rule_t, batch_rule_t batch_rule>
26467 at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
26468   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26469   auto maybe_layer = maybeCurrentDynamicLayer();
26470   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26471   int64_t cur_level = maybe_layer->layerId();
26472   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
26473     return at::_ops::resize_as_sparse::call(self, the_template);
26474   }
26475   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26476   auto [the_template_value, the_template_bdim] = unwrapTensorAtLevel(the_template, cur_level);
26477   auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
26478   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26479 }
26480 template <typename batch_rule_t, batch_rule_t batch_rule>
26481 at::Tensor zero_generated_plumbing(const at::Tensor & self) {
26482   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26483   auto maybe_layer = maybeCurrentDynamicLayer();
26484   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26485   int64_t cur_level = maybe_layer->layerId();
26486   if (!isBatchedAtLevel(self, cur_level)) {
26487     return at::_ops::zero::call(self);
26488   }
26489   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26490   auto results = batch_rule(self_value, self_bdim);
26491   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26492 }
26493 template <typename batch_rule_t, batch_rule_t batch_rule>
26494 at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
26495   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26496   auto maybe_layer = maybeCurrentDynamicLayer();
26497   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26498   int64_t cur_level = maybe_layer->layerId();
26499   if (!isBatchedAtLevel(self, cur_level)) {
26500     return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim);
26501   }
26502   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26503   auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
26504   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26505 }
26506 template <typename batch_rule_t, batch_rule_t batch_rule>
26507 at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
26508   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26509   auto maybe_layer = maybeCurrentDynamicLayer();
26510   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26511   int64_t cur_level = maybe_layer->layerId();
26512   if (!isBatchedAtLevel(self, cur_level)) {
26513     return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim);
26514   }
26515   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26516   auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
26517   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26518 }
26519 template <typename batch_rule_t, batch_rule_t batch_rule>
26520 at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) {
26521   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26522   auto maybe_layer = maybeCurrentDynamicLayer();
26523   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26524   int64_t cur_level = maybe_layer->layerId();
26525   if (!isBatchedAtLevel(self, cur_level)) {
26526     return at::_ops::_coalesced::call(self, coalesced);
26527   }
26528   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26529   auto results = batch_rule(self_value, self_bdim, coalesced);
26530   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26531 }
26532 template <typename batch_rule_t, batch_rule_t batch_rule>
26533 at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
26534   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26535   auto maybe_layer = maybeCurrentDynamicLayer();
26536   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26537   int64_t cur_level = maybe_layer->layerId();
26538   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
26539     return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking);
26540   }
26541   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26542   auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level);
26543   auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
26544   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26545 }
26546 template <typename batch_rule_t, batch_rule_t batch_rule>
26547 void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
26548   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26549   auto maybe_layer = maybeCurrentDynamicLayer();
26550   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26551   int64_t cur_level = maybe_layer->layerId();
26552   if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26553     return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
26554   }
26555   auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level);
26556   auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level);
26557   batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out);
26558 }
26559 template <typename batch_rule_t, batch_rule_t batch_rule>
26560 void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) {
26561   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26562   auto maybe_layer = maybeCurrentDynamicLayer();
26563   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26564   int64_t cur_level = maybe_layer->layerId();
26565   if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26566     return at::_ops::dequantize_tensors_out::call(tensors, out);
26567   }
26568 
26569   batch_rule(tensors, out);
26570 }
26571 template <typename batch_rule_t, batch_rule_t batch_rule>
26572 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
26573   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26574   auto maybe_layer = maybeCurrentDynamicLayer();
26575   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26576   int64_t cur_level = maybe_layer->layerId();
26577   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
26578     return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
26579   }
26580   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26581   auto [observer_on_value, observer_on_bdim] = unwrapTensorAtLevel(observer_on, cur_level);
26582   auto [fake_quant_on_value, fake_quant_on_bdim] = unwrapTensorAtLevel(fake_quant_on, cur_level);
26583   auto [running_min_value, running_min_bdim] = unwrapTensorAtLevel(running_min, cur_level);
26584   auto [running_max_value, running_max_bdim] = unwrapTensorAtLevel(running_max, cur_level);
26585   auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level);
26586   auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level);
26587   auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
26588   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
26589 }
26590 template <typename batch_rule_t, batch_rule_t batch_rule>
26591 void lstm_mps_backward_out_generated_plumbing(const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
26592   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26593   auto maybe_layer = maybeCurrentDynamicLayer();
26594   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26595   int64_t cur_level = maybe_layer->layerId();
26596   if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) {
26597     return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
26598   }
26599   auto [z_state_value, z_state_bdim] = unwrapTensorAtLevel(z_state, cur_level);
26600   auto [cell_state_fwd_value, cell_state_fwd_bdim] = unwrapTensorAtLevel(cell_state_fwd, cur_level);
26601   auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level);
26602   auto [layersOutputs_value, layersOutputs_bdim] = unwrapTensorAtLevel(layersOutputs, cur_level);
26603   auto [out0_value, out0_bdim] = unwrapTensorAtLevel(out0, cur_level);
26604   std::optional<Tensor> grad_y_value;
26605   std::optional<int64_t> grad_y_bdim;
26606   if (grad_y) {
26607       std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level);
26608   }
26609   std::optional<Tensor> grad_hy_value;
26610   std::optional<int64_t> grad_hy_bdim;
26611   if (grad_hy) {
26612       std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
26613   }
26614   std::optional<Tensor> grad_cy_value;
26615   std::optional<int64_t> grad_cy_bdim;
26616   if (grad_cy) {
26617       std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
26618   }
26619   batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2);
26620 }
26621 template <typename batch_rule_t, batch_rule_t batch_rule>
26622 at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) {
26623   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26624   auto maybe_layer = maybeCurrentDynamicLayer();
26625   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26626   int64_t cur_level = maybe_layer->layerId();
26627   if (!isBatchedAtLevel(self, cur_level)) {
26628     return at::_ops::set_source_Storage::call(self, source);
26629   }
26630   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26631   auto results = batch_rule(self_value, self_bdim, source);
26632   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26633 }
26634 template <typename batch_rule_t, batch_rule_t batch_rule>
26635 at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
26636   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26637   auto maybe_layer = maybeCurrentDynamicLayer();
26638   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26639   int64_t cur_level = maybe_layer->layerId();
26640   if (!isBatchedAtLevel(self, cur_level)) {
26641     return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
26642   }
26643   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26644   auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
26645   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26646 }
26647 template <typename batch_rule_t, batch_rule_t batch_rule>
26648 at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) {
26649   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26650   auto maybe_layer = maybeCurrentDynamicLayer();
26651   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26652   int64_t cur_level = maybe_layer->layerId();
26653   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
26654     return at::_ops::set_source_Tensor::call(self, source);
26655   }
26656   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26657   auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level);
26658   auto results = batch_rule(self_value, self_bdim, source_value, source_bdim);
26659   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26660 }
26661 template <typename batch_rule_t, batch_rule_t batch_rule>
26662 at::Tensor set_generated_plumbing(const at::Tensor & self) {
26663   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26664   auto maybe_layer = maybeCurrentDynamicLayer();
26665   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26666   int64_t cur_level = maybe_layer->layerId();
26667   if (!isBatchedAtLevel(self, cur_level)) {
26668     return at::_ops::set::call(self);
26669   }
26670   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26671   auto results = batch_rule(self_value, self_bdim);
26672   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26673 }
26674 template <typename batch_rule_t, batch_rule_t batch_rule>
26675 at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
26676   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26677   auto maybe_layer = maybeCurrentDynamicLayer();
26678   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26679   int64_t cur_level = maybe_layer->layerId();
26680   if (!isBatchedAtLevel(self, cur_level)) {
26681     return at::_ops::random_from::call(self, from, to, generator);
26682   }
26683   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26684   auto results = batch_rule(self_value, self_bdim, from, to, generator);
26685   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26686 }
26687 template <typename batch_rule_t, batch_rule_t batch_rule>
26688 at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
26689   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26690   auto maybe_layer = maybeCurrentDynamicLayer();
26691   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26692   int64_t cur_level = maybe_layer->layerId();
26693   if (!isBatchedAtLevel(self, cur_level)) {
26694     return at::_ops::random_to::call(self, to, generator);
26695   }
26696   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26697   auto results = batch_rule(self_value, self_bdim, to, generator);
26698   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26699 }
26700 template <typename batch_rule_t, batch_rule_t batch_rule>
26701 at::Tensor random_generated_plumbing(const at::Tensor & self, ::std::optional<at::Generator> generator) {
26702   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26703   auto maybe_layer = maybeCurrentDynamicLayer();
26704   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26705   int64_t cur_level = maybe_layer->layerId();
26706   if (!isBatchedAtLevel(self, cur_level)) {
26707     return at::_ops::random::call(self, generator);
26708   }
26709   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26710   auto results = batch_rule(self_value, self_bdim, generator);
26711   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26712 }
26713 template <typename batch_rule_t, batch_rule_t batch_rule>
26714 at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
26715   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26716   auto maybe_layer = maybeCurrentDynamicLayer();
26717   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26718   int64_t cur_level = maybe_layer->layerId();
26719   if (!isBatchedAtLevel(self, cur_level)) {
26720     return at::_ops::uniform::call(self, from, to, generator);
26721   }
26722   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26723   auto results = batch_rule(self_value, self_bdim, from, to, generator);
26724   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26725 }
26726 template <typename batch_rule_t, batch_rule_t batch_rule>
26727 at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
26728   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26729   auto maybe_layer = maybeCurrentDynamicLayer();
26730   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26731   int64_t cur_level = maybe_layer->layerId();
26732   if (!isBatchedAtLevel(self, cur_level)) {
26733     return at::_ops::cauchy::call(self, median, sigma, generator);
26734   }
26735   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26736   auto results = batch_rule(self_value, self_bdim, median, sigma, generator);
26737   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26738 }
26739 template <typename batch_rule_t, batch_rule_t batch_rule>
26740 at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
26741   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26742   auto maybe_layer = maybeCurrentDynamicLayer();
26743   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26744   int64_t cur_level = maybe_layer->layerId();
26745   if (!isBatchedAtLevel(self, cur_level)) {
26746     return at::_ops::log_normal::call(self, mean, std, generator);
26747   }
26748   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26749   auto results = batch_rule(self_value, self_bdim, mean, std, generator);
26750   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26751 }
26752 template <typename batch_rule_t, batch_rule_t batch_rule>
26753 at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
26754   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26755   auto maybe_layer = maybeCurrentDynamicLayer();
26756   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26757   int64_t cur_level = maybe_layer->layerId();
26758   if (!isBatchedAtLevel(self, cur_level)) {
26759     return at::_ops::exponential::call(self, lambd, generator);
26760   }
26761   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26762   auto results = batch_rule(self_value, self_bdim, lambd, generator);
26763   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26764 }
26765 template <typename batch_rule_t, batch_rule_t batch_rule>
26766 at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
26767   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26768   auto maybe_layer = maybeCurrentDynamicLayer();
26769   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26770   int64_t cur_level = maybe_layer->layerId();
26771   if (!isBatchedAtLevel(self, cur_level)) {
26772     return at::_ops::geometric::call(self, p, generator);
26773   }
26774   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26775   auto results = batch_rule(self_value, self_bdim, p, generator);
26776   return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26777 }
26778 template <typename batch_rule_t, batch_rule_t batch_rule>
26779 void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::TensorList out) {
26780   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26781   auto maybe_layer = maybeCurrentDynamicLayer();
26782   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26783   int64_t cur_level = maybe_layer->layerId();
26784   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26785     return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
26786   }
26787   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26788   std::optional<Tensor> weight_value;
26789   std::optional<int64_t> weight_bdim;
26790   if (weight) {
26791       std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
26792   }
26793   batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out);
26794 }
26795 template <typename batch_rule_t, batch_rule_t batch_rule>
26796 void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
26797   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26798   auto maybe_layer = maybeCurrentDynamicLayer();
26799   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26800   int64_t cur_level = maybe_layer->layerId();
26801   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26802     return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
26803   }
26804   auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level);
26805   auto [inv_scale_value, inv_scale_bdim] = unwrapTensorAtLevel(inv_scale, cur_level);
26806   batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out);
26807 }
26808 template <typename batch_rule_t, batch_rule_t batch_rule>
26809 ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
26810   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26811   auto maybe_layer = maybeCurrentDynamicLayer();
26812   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26813   int64_t cur_level = maybe_layer->layerId();
26814   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
26815     return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
26816   }
26817   auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level);
26818   auto [inv_scale_value, inv_scale_bdim] = unwrapTensorAtLevel(inv_scale, cur_level);
26819   auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
26820   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26821 }
26822 template <typename batch_rule_t, batch_rule_t batch_rule>
26823 ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
26824   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26825   auto maybe_layer = maybeCurrentDynamicLayer();
26826   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26827   int64_t cur_level = maybe_layer->layerId();
26828   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
26829     return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
26830   }
26831   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
26832   auto [growth_tracker_value, growth_tracker_bdim] = unwrapTensorAtLevel(growth_tracker, cur_level);
26833   auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level);
26834   auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval);
26835   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26836 }
26837 template <typename batch_rule_t, batch_rule_t batch_rule>
26838 void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
26839   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26840   auto maybe_layer = maybeCurrentDynamicLayer();
26841   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26842   int64_t cur_level = maybe_layer->layerId();
26843   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26844     return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
26845   }
26846 
26847   batch_rule(self, scalar, out);
26848 }
26849 template <typename batch_rule_t, batch_rule_t batch_rule>
26850 void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
26851   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26852   auto maybe_layer = maybeCurrentDynamicLayer();
26853   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26854   int64_t cur_level = maybe_layer->layerId();
26855   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26856     return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
26857   }
26858 
26859   batch_rule(self, other, alpha, out);
26860 }
26861 template <typename batch_rule_t, batch_rule_t batch_rule>
26862 void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
26863   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26864   auto maybe_layer = maybeCurrentDynamicLayer();
26865   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26866   int64_t cur_level = maybe_layer->layerId();
26867   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26868     return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
26869   }
26870 
26871   batch_rule(self, scalars, out);
26872 }
26873 template <typename batch_rule_t, batch_rule_t batch_rule>
26874 void _foreach_add_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) {
26875   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26876   auto maybe_layer = maybeCurrentDynamicLayer();
26877   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26878   int64_t cur_level = maybe_layer->layerId();
26879   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26880     return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out);
26881   }
26882   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
26883   batch_rule(self, other_value, other_bdim, alpha, out);
26884 }
26885 template <typename batch_rule_t, batch_rule_t batch_rule>
26886 void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
26887   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26888   auto maybe_layer = maybeCurrentDynamicLayer();
26889   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26890   int64_t cur_level = maybe_layer->layerId();
26891   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26892     return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
26893   }
26894 
26895   batch_rule(self, scalar, out);
26896 }
26897 template <typename batch_rule_t, batch_rule_t batch_rule>
26898 void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
26899   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26900   auto maybe_layer = maybeCurrentDynamicLayer();
26901   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26902   int64_t cur_level = maybe_layer->layerId();
26903   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26904     return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
26905   }
26906 
26907   batch_rule(self, other, alpha, out);
26908 }
26909 template <typename batch_rule_t, batch_rule_t batch_rule>
26910 void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
26911   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26912   auto maybe_layer = maybeCurrentDynamicLayer();
26913   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26914   int64_t cur_level = maybe_layer->layerId();
26915   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26916     return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
26917   }
26918 
26919   batch_rule(self, scalars, out);
26920 }
26921 template <typename batch_rule_t, batch_rule_t batch_rule>
26922 void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
26923   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26924   auto maybe_layer = maybeCurrentDynamicLayer();
26925   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26926   int64_t cur_level = maybe_layer->layerId();
26927   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26928     return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
26929   }
26930 
26931   batch_rule(self, scalar, out);
26932 }
26933 template <typename batch_rule_t, batch_rule_t batch_rule>
26934 void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
26935   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26936   auto maybe_layer = maybeCurrentDynamicLayer();
26937   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26938   int64_t cur_level = maybe_layer->layerId();
26939   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26940     return at::_ops::_foreach_mul_List_out::call(self, other, out);
26941   }
26942 
26943   batch_rule(self, other, out);
26944 }
26945 template <typename batch_rule_t, batch_rule_t batch_rule>
26946 void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
26947   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26948   auto maybe_layer = maybeCurrentDynamicLayer();
26949   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26950   int64_t cur_level = maybe_layer->layerId();
26951   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26952     return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
26953   }
26954 
26955   batch_rule(self, scalars, out);
26956 }
26957 template <typename batch_rule_t, batch_rule_t batch_rule>
26958 void _foreach_mul_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) {
26959   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26960   auto maybe_layer = maybeCurrentDynamicLayer();
26961   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26962   int64_t cur_level = maybe_layer->layerId();
26963   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26964     return at::_ops::_foreach_mul_Tensor_out::call(self, other, out);
26965   }
26966   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
26967   batch_rule(self, other_value, other_bdim, out);
26968 }
26969 template <typename batch_rule_t, batch_rule_t batch_rule>
26970 void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
26971   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26972   auto maybe_layer = maybeCurrentDynamicLayer();
26973   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26974   int64_t cur_level = maybe_layer->layerId();
26975   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26976     return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
26977   }
26978 
26979   batch_rule(self, scalar, out);
26980 }
26981 template <typename batch_rule_t, batch_rule_t batch_rule>
26982 void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
26983   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26984   auto maybe_layer = maybeCurrentDynamicLayer();
26985   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26986   int64_t cur_level = maybe_layer->layerId();
26987   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
26988     return at::_ops::_foreach_div_List_out::call(self, other, out);
26989   }
26990 
26991   batch_rule(self, other, out);
26992 }
26993 template <typename batch_rule_t, batch_rule_t batch_rule>
26994 void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
26995   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26996   auto maybe_layer = maybeCurrentDynamicLayer();
26997   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
26998   int64_t cur_level = maybe_layer->layerId();
26999   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27000     return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
27001   }
27002 
27003   batch_rule(self, scalars, out);
27004 }
27005 template <typename batch_rule_t, batch_rule_t batch_rule>
27006 void _foreach_div_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) {
27007   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27008   auto maybe_layer = maybeCurrentDynamicLayer();
27009   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27010   int64_t cur_level = maybe_layer->layerId();
27011   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27012     return at::_ops::_foreach_div_Tensor_out::call(self, other, out);
27013   }
27014   auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level);
27015   batch_rule(self, other_value, other_bdim, out);
27016 }
27017 template <typename batch_rule_t, batch_rule_t batch_rule>
27018 void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
27019   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27020   auto maybe_layer = maybeCurrentDynamicLayer();
27021   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27022   int64_t cur_level = maybe_layer->layerId();
27023   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27024     return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
27025   }
27026 
27027   batch_rule(self, scalar, out);
27028 }
27029 template <typename batch_rule_t, batch_rule_t batch_rule>
27030 void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
27031   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27032   auto maybe_layer = maybeCurrentDynamicLayer();
27033   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27034   int64_t cur_level = maybe_layer->layerId();
27035   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27036     return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
27037   }
27038 
27039   batch_rule(self, other, out);
27040 }
27041 template <typename batch_rule_t, batch_rule_t batch_rule>
27042 void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
27043   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27044   auto maybe_layer = maybeCurrentDynamicLayer();
27045   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27046   int64_t cur_level = maybe_layer->layerId();
27047   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27048     return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
27049   }
27050 
27051   batch_rule(self, scalars, out);
27052 }
27053 template <typename batch_rule_t, batch_rule_t batch_rule>
27054 void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
27055   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27056   auto maybe_layer = maybeCurrentDynamicLayer();
27057   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27058   int64_t cur_level = maybe_layer->layerId();
27059   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27060     return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
27061   }
27062 
27063   batch_rule(self, scalar, out);
27064 }
27065 template <typename batch_rule_t, batch_rule_t batch_rule>
27066 void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
27067   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27068   auto maybe_layer = maybeCurrentDynamicLayer();
27069   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27070   int64_t cur_level = maybe_layer->layerId();
27071   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27072     return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
27073   }
27074 
27075   batch_rule(self, other, out);
27076 }
27077 template <typename batch_rule_t, batch_rule_t batch_rule>
27078 void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
27079   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27080   auto maybe_layer = maybeCurrentDynamicLayer();
27081   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27082   int64_t cur_level = maybe_layer->layerId();
27083   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27084     return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
27085   }
27086 
27087   batch_rule(self, scalars, out);
27088 }
27089 template <typename batch_rule_t, batch_rule_t batch_rule>
27090 void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
27091   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27092   auto maybe_layer = maybeCurrentDynamicLayer();
27093   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27094   int64_t cur_level = maybe_layer->layerId();
27095   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27096     return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
27097   }
27098 
27099   batch_rule(self, scalar, out);
27100 }
27101 template <typename batch_rule_t, batch_rule_t batch_rule>
27102 void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
27103   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27104   auto maybe_layer = maybeCurrentDynamicLayer();
27105   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27106   int64_t cur_level = maybe_layer->layerId();
27107   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27108     return at::_ops::_foreach_maximum_List_out::call(self, other, out);
27109   }
27110 
27111   batch_rule(self, other, out);
27112 }
27113 template <typename batch_rule_t, batch_rule_t batch_rule>
27114 void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
27115   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27116   auto maybe_layer = maybeCurrentDynamicLayer();
27117   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27118   int64_t cur_level = maybe_layer->layerId();
27119   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27120     return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
27121   }
27122 
27123   batch_rule(self, scalars, out);
27124 }
27125 template <typename batch_rule_t, batch_rule_t batch_rule>
27126 void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
27127   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27128   auto maybe_layer = maybeCurrentDynamicLayer();
27129   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27130   int64_t cur_level = maybe_layer->layerId();
27131   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27132     return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
27133   }
27134 
27135   batch_rule(self, scalar, out);
27136 }
27137 template <typename batch_rule_t, batch_rule_t batch_rule>
27138 void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
27139   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27140   auto maybe_layer = maybeCurrentDynamicLayer();
27141   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27142   int64_t cur_level = maybe_layer->layerId();
27143   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27144     return at::_ops::_foreach_minimum_List_out::call(self, other, out);
27145   }
27146 
27147   batch_rule(self, other, out);
27148 }
27149 template <typename batch_rule_t, batch_rule_t batch_rule>
27150 void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
27151   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27152   auto maybe_layer = maybeCurrentDynamicLayer();
27153   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27154   int64_t cur_level = maybe_layer->layerId();
27155   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27156     return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
27157   }
27158 
27159   batch_rule(self, scalars, out);
27160 }
27161 template <typename batch_rule_t, batch_rule_t batch_rule>
27162 void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
27163   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27164   auto maybe_layer = maybeCurrentDynamicLayer();
27165   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27166   int64_t cur_level = maybe_layer->layerId();
27167   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27168     return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
27169   }
27170 
27171   batch_rule(self, tensor1, tensor2, value, out);
27172 }
27173 template <typename batch_rule_t, batch_rule_t batch_rule>
27174 void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
27175   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27176   auto maybe_layer = maybeCurrentDynamicLayer();
27177   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27178   int64_t cur_level = maybe_layer->layerId();
27179   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27180     return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
27181   }
27182 
27183   batch_rule(self, tensor1, tensor2, scalars, out);
27184 }
27185 template <typename batch_rule_t, batch_rule_t batch_rule>
27186 void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
27187   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27188   auto maybe_layer = maybeCurrentDynamicLayer();
27189   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27190   int64_t cur_level = maybe_layer->layerId();
27191   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27192     return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
27193   }
27194   auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level);
27195   batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
27196 }
27197 template <typename batch_rule_t, batch_rule_t batch_rule>
27198 void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
27199   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27200   auto maybe_layer = maybeCurrentDynamicLayer();
27201   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27202   int64_t cur_level = maybe_layer->layerId();
27203   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27204     return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
27205   }
27206 
27207   batch_rule(self, tensor1, tensor2, value, out);
27208 }
27209 template <typename batch_rule_t, batch_rule_t batch_rule>
27210 void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
27211   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27212   auto maybe_layer = maybeCurrentDynamicLayer();
27213   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27214   int64_t cur_level = maybe_layer->layerId();
27215   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27216     return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
27217   }
27218 
27219   batch_rule(self, tensor1, tensor2, scalars, out);
27220 }
27221 template <typename batch_rule_t, batch_rule_t batch_rule>
27222 void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
27223   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27224   auto maybe_layer = maybeCurrentDynamicLayer();
27225   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27226   int64_t cur_level = maybe_layer->layerId();
27227   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27228     return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
27229   }
27230   auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level);
27231   batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
27232 }
27233 template <typename batch_rule_t, batch_rule_t batch_rule>
27234 void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27235   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27236   auto maybe_layer = maybeCurrentDynamicLayer();
27237   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27238   int64_t cur_level = maybe_layer->layerId();
27239   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27240     return at::_ops::_foreach_abs_out::call(self, out);
27241   }
27242 
27243   batch_rule(self, out);
27244 }
27245 template <typename batch_rule_t, batch_rule_t batch_rule>
27246 void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27247   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27248   auto maybe_layer = maybeCurrentDynamicLayer();
27249   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27250   int64_t cur_level = maybe_layer->layerId();
27251   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27252     return at::_ops::_foreach_acos_out::call(self, out);
27253   }
27254 
27255   batch_rule(self, out);
27256 }
27257 template <typename batch_rule_t, batch_rule_t batch_rule>
27258 void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27259   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27260   auto maybe_layer = maybeCurrentDynamicLayer();
27261   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27262   int64_t cur_level = maybe_layer->layerId();
27263   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27264     return at::_ops::_foreach_asin_out::call(self, out);
27265   }
27266 
27267   batch_rule(self, out);
27268 }
27269 template <typename batch_rule_t, batch_rule_t batch_rule>
27270 void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27271   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27272   auto maybe_layer = maybeCurrentDynamicLayer();
27273   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27274   int64_t cur_level = maybe_layer->layerId();
27275   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27276     return at::_ops::_foreach_atan_out::call(self, out);
27277   }
27278 
27279   batch_rule(self, out);
27280 }
27281 template <typename batch_rule_t, batch_rule_t batch_rule>
27282 void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27283   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27284   auto maybe_layer = maybeCurrentDynamicLayer();
27285   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27286   int64_t cur_level = maybe_layer->layerId();
27287   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27288     return at::_ops::_foreach_ceil_out::call(self, out);
27289   }
27290 
27291   batch_rule(self, out);
27292 }
27293 template <typename batch_rule_t, batch_rule_t batch_rule>
27294 void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27295   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27296   auto maybe_layer = maybeCurrentDynamicLayer();
27297   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27298   int64_t cur_level = maybe_layer->layerId();
27299   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27300     return at::_ops::_foreach_cos_out::call(self, out);
27301   }
27302 
27303   batch_rule(self, out);
27304 }
27305 template <typename batch_rule_t, batch_rule_t batch_rule>
27306 void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27307   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27308   auto maybe_layer = maybeCurrentDynamicLayer();
27309   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27310   int64_t cur_level = maybe_layer->layerId();
27311   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27312     return at::_ops::_foreach_cosh_out::call(self, out);
27313   }
27314 
27315   batch_rule(self, out);
27316 }
27317 template <typename batch_rule_t, batch_rule_t batch_rule>
27318 void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27319   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27320   auto maybe_layer = maybeCurrentDynamicLayer();
27321   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27322   int64_t cur_level = maybe_layer->layerId();
27323   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27324     return at::_ops::_foreach_erf_out::call(self, out);
27325   }
27326 
27327   batch_rule(self, out);
27328 }
27329 template <typename batch_rule_t, batch_rule_t batch_rule>
27330 void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27331   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27332   auto maybe_layer = maybeCurrentDynamicLayer();
27333   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27334   int64_t cur_level = maybe_layer->layerId();
27335   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27336     return at::_ops::_foreach_erfc_out::call(self, out);
27337   }
27338 
27339   batch_rule(self, out);
27340 }
27341 template <typename batch_rule_t, batch_rule_t batch_rule>
27342 void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27343   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27344   auto maybe_layer = maybeCurrentDynamicLayer();
27345   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27346   int64_t cur_level = maybe_layer->layerId();
27347   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27348     return at::_ops::_foreach_exp_out::call(self, out);
27349   }
27350 
27351   batch_rule(self, out);
27352 }
27353 template <typename batch_rule_t, batch_rule_t batch_rule>
27354 void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27355   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27356   auto maybe_layer = maybeCurrentDynamicLayer();
27357   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27358   int64_t cur_level = maybe_layer->layerId();
27359   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27360     return at::_ops::_foreach_expm1_out::call(self, out);
27361   }
27362 
27363   batch_rule(self, out);
27364 }
27365 template <typename batch_rule_t, batch_rule_t batch_rule>
27366 void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27367   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27368   auto maybe_layer = maybeCurrentDynamicLayer();
27369   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27370   int64_t cur_level = maybe_layer->layerId();
27371   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27372     return at::_ops::_foreach_floor_out::call(self, out);
27373   }
27374 
27375   batch_rule(self, out);
27376 }
27377 template <typename batch_rule_t, batch_rule_t batch_rule>
27378 void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27379   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27380   auto maybe_layer = maybeCurrentDynamicLayer();
27381   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27382   int64_t cur_level = maybe_layer->layerId();
27383   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27384     return at::_ops::_foreach_frac_out::call(self, out);
27385   }
27386 
27387   batch_rule(self, out);
27388 }
27389 template <typename batch_rule_t, batch_rule_t batch_rule>
27390 void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
27391   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27392   auto maybe_layer = maybeCurrentDynamicLayer();
27393   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27394   int64_t cur_level = maybe_layer->layerId();
27395   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27396     return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
27397   }
27398 
27399   batch_rule(self, tensors1, weights, out);
27400 }
27401 template <typename batch_rule_t, batch_rule_t batch_rule>
27402 void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
27403   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27404   auto maybe_layer = maybeCurrentDynamicLayer();
27405   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27406   int64_t cur_level = maybe_layer->layerId();
27407   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27408     return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
27409   }
27410 
27411   batch_rule(self, tensors1, weight, out);
27412 }
27413 template <typename batch_rule_t, batch_rule_t batch_rule>
27414 void _foreach_lerp_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight, at::TensorList out) {
27415   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27416   auto maybe_layer = maybeCurrentDynamicLayer();
27417   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27418   int64_t cur_level = maybe_layer->layerId();
27419   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27420     return at::_ops::_foreach_lerp_ScalarList_out::call(self, tensors1, weight, out);
27421   }
27422 
27423   batch_rule(self, tensors1, weight, out);
27424 }
27425 template <typename batch_rule_t, batch_rule_t batch_rule>
27426 void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27427   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27428   auto maybe_layer = maybeCurrentDynamicLayer();
27429   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27430   int64_t cur_level = maybe_layer->layerId();
27431   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27432     return at::_ops::_foreach_lgamma_out::call(self, out);
27433   }
27434 
27435   batch_rule(self, out);
27436 }
27437 template <typename batch_rule_t, batch_rule_t batch_rule>
27438 void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27439   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27440   auto maybe_layer = maybeCurrentDynamicLayer();
27441   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27442   int64_t cur_level = maybe_layer->layerId();
27443   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27444     return at::_ops::_foreach_log_out::call(self, out);
27445   }
27446 
27447   batch_rule(self, out);
27448 }
27449 template <typename batch_rule_t, batch_rule_t batch_rule>
27450 void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27451   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27452   auto maybe_layer = maybeCurrentDynamicLayer();
27453   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27454   int64_t cur_level = maybe_layer->layerId();
27455   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27456     return at::_ops::_foreach_log10_out::call(self, out);
27457   }
27458 
27459   batch_rule(self, out);
27460 }
27461 template <typename batch_rule_t, batch_rule_t batch_rule>
27462 void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27463   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27464   auto maybe_layer = maybeCurrentDynamicLayer();
27465   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27466   int64_t cur_level = maybe_layer->layerId();
27467   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27468     return at::_ops::_foreach_log1p_out::call(self, out);
27469   }
27470 
27471   batch_rule(self, out);
27472 }
27473 template <typename batch_rule_t, batch_rule_t batch_rule>
27474 void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27475   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27476   auto maybe_layer = maybeCurrentDynamicLayer();
27477   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27478   int64_t cur_level = maybe_layer->layerId();
27479   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27480     return at::_ops::_foreach_log2_out::call(self, out);
27481   }
27482 
27483   batch_rule(self, out);
27484 }
27485 template <typename batch_rule_t, batch_rule_t batch_rule>
27486 void _foreach_max_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27487   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27488   auto maybe_layer = maybeCurrentDynamicLayer();
27489   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27490   int64_t cur_level = maybe_layer->layerId();
27491   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27492     return at::_ops::_foreach_max_out::call(self, out);
27493   }
27494 
27495   batch_rule(self, out);
27496 }
27497 template <typename batch_rule_t, batch_rule_t batch_rule>
27498 void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27499   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27500   auto maybe_layer = maybeCurrentDynamicLayer();
27501   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27502   int64_t cur_level = maybe_layer->layerId();
27503   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27504     return at::_ops::_foreach_neg_out::call(self, out);
27505   }
27506 
27507   batch_rule(self, out);
27508 }
27509 template <typename batch_rule_t, batch_rule_t batch_rule>
27510 void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype, at::TensorList out) {
27511   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27512   auto maybe_layer = maybeCurrentDynamicLayer();
27513   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27514   int64_t cur_level = maybe_layer->layerId();
27515   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27516     return at::_ops::_foreach_norm_Scalar_out::call(self, ord, dtype, out);
27517   }
27518 
27519   batch_rule(self, ord, dtype, out);
27520 }
27521 template <typename batch_rule_t, batch_rule_t batch_rule>
27522 void _foreach_pow_List_out_generated_plumbing(at::TensorList self, at::TensorList exponent, at::TensorList out) {
27523   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27524   auto maybe_layer = maybeCurrentDynamicLayer();
27525   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27526   int64_t cur_level = maybe_layer->layerId();
27527   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27528     return at::_ops::_foreach_pow_List_out::call(self, exponent, out);
27529   }
27530 
27531   batch_rule(self, exponent, out);
27532 }
27533 template <typename batch_rule_t, batch_rule_t batch_rule>
27534 void _foreach_pow_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
27535   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27536   auto maybe_layer = maybeCurrentDynamicLayer();
27537   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27538   int64_t cur_level = maybe_layer->layerId();
27539   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27540     return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out);
27541   }
27542 
27543   batch_rule(self, exponent, out);
27544 }
27545 template <typename batch_rule_t, batch_rule_t batch_rule>
27546 void _foreach_pow_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
27547   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27548   auto maybe_layer = maybeCurrentDynamicLayer();
27549   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27550   int64_t cur_level = maybe_layer->layerId();
27551   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27552     return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out);
27553   }
27554 
27555   batch_rule(self, exponent, out);
27556 }
27557 template <typename batch_rule_t, batch_rule_t batch_rule>
27558 void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27559   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27560   auto maybe_layer = maybeCurrentDynamicLayer();
27561   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27562   int64_t cur_level = maybe_layer->layerId();
27563   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27564     return at::_ops::_foreach_reciprocal_out::call(self, out);
27565   }
27566 
27567   batch_rule(self, out);
27568 }
27569 template <typename batch_rule_t, batch_rule_t batch_rule>
27570 void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27571   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27572   auto maybe_layer = maybeCurrentDynamicLayer();
27573   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27574   int64_t cur_level = maybe_layer->layerId();
27575   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27576     return at::_ops::_foreach_round_out::call(self, out);
27577   }
27578 
27579   batch_rule(self, out);
27580 }
27581 template <typename batch_rule_t, batch_rule_t batch_rule>
27582 void _foreach_rsqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27583   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27584   auto maybe_layer = maybeCurrentDynamicLayer();
27585   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27586   int64_t cur_level = maybe_layer->layerId();
27587   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27588     return at::_ops::_foreach_rsqrt_out::call(self, out);
27589   }
27590 
27591   batch_rule(self, out);
27592 }
27593 template <typename batch_rule_t, batch_rule_t batch_rule>
27594 void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27595   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27596   auto maybe_layer = maybeCurrentDynamicLayer();
27597   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27598   int64_t cur_level = maybe_layer->layerId();
27599   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27600     return at::_ops::_foreach_sigmoid_out::call(self, out);
27601   }
27602 
27603   batch_rule(self, out);
27604 }
27605 template <typename batch_rule_t, batch_rule_t batch_rule>
27606 void _foreach_sign_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27607   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27608   auto maybe_layer = maybeCurrentDynamicLayer();
27609   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27610   int64_t cur_level = maybe_layer->layerId();
27611   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27612     return at::_ops::_foreach_sign_out::call(self, out);
27613   }
27614 
27615   batch_rule(self, out);
27616 }
27617 template <typename batch_rule_t, batch_rule_t batch_rule>
27618 void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27619   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27620   auto maybe_layer = maybeCurrentDynamicLayer();
27621   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27622   int64_t cur_level = maybe_layer->layerId();
27623   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27624     return at::_ops::_foreach_sin_out::call(self, out);
27625   }
27626 
27627   batch_rule(self, out);
27628 }
27629 template <typename batch_rule_t, batch_rule_t batch_rule>
27630 void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27631   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27632   auto maybe_layer = maybeCurrentDynamicLayer();
27633   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27634   int64_t cur_level = maybe_layer->layerId();
27635   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27636     return at::_ops::_foreach_sinh_out::call(self, out);
27637   }
27638 
27639   batch_rule(self, out);
27640 }
27641 template <typename batch_rule_t, batch_rule_t batch_rule>
27642 void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27643   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27644   auto maybe_layer = maybeCurrentDynamicLayer();
27645   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27646   int64_t cur_level = maybe_layer->layerId();
27647   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27648     return at::_ops::_foreach_sqrt_out::call(self, out);
27649   }
27650 
27651   batch_rule(self, out);
27652 }
27653 template <typename batch_rule_t, batch_rule_t batch_rule>
27654 void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27655   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27656   auto maybe_layer = maybeCurrentDynamicLayer();
27657   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27658   int64_t cur_level = maybe_layer->layerId();
27659   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27660     return at::_ops::_foreach_tan_out::call(self, out);
27661   }
27662 
27663   batch_rule(self, out);
27664 }
27665 template <typename batch_rule_t, batch_rule_t batch_rule>
27666 void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27667   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27668   auto maybe_layer = maybeCurrentDynamicLayer();
27669   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27670   int64_t cur_level = maybe_layer->layerId();
27671   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27672     return at::_ops::_foreach_tanh_out::call(self, out);
27673   }
27674 
27675   batch_rule(self, out);
27676 }
27677 template <typename batch_rule_t, batch_rule_t batch_rule>
27678 void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27679   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27680   auto maybe_layer = maybeCurrentDynamicLayer();
27681   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27682   int64_t cur_level = maybe_layer->layerId();
27683   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27684     return at::_ops::_foreach_trunc_out::call(self, out);
27685   }
27686 
27687   batch_rule(self, out);
27688 }
27689 template <typename batch_rule_t, batch_rule_t batch_rule>
27690 void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) {
27691   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27692   auto maybe_layer = maybeCurrentDynamicLayer();
27693   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27694   int64_t cur_level = maybe_layer->layerId();
27695   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27696     return at::_ops::_foreach_zero_out::call(self, out);
27697   }
27698 
27699   batch_rule(self, out);
27700 }
27701 template <typename batch_rule_t, batch_rule_t batch_rule>
27702 ::std::vector<at::Tensor> _foreach_zero_generated_plumbing(at::TensorList self) {
27703   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27704   auto maybe_layer = maybeCurrentDynamicLayer();
27705   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27706   int64_t cur_level = maybe_layer->layerId();
27707   if (!isBatchedAtLevel(self, cur_level)) {
27708     return at::_ops::_foreach_zero::call(self);
27709   }
27710 
27711   auto results = batch_rule(self);
27712   return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
27713 }
27714 template <typename batch_rule_t, batch_rule_t batch_rule>
27715 void _foreach_copy_out_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) {
27716   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27717   auto maybe_layer = maybeCurrentDynamicLayer();
27718   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27719   int64_t cur_level = maybe_layer->layerId();
27720   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27721     return at::_ops::_foreach_copy_out::call(self, src, non_blocking, out);
27722   }
27723 
27724   batch_rule(self, src, non_blocking, out);
27725 }
27726 template <typename batch_rule_t, batch_rule_t batch_rule>
27727 ::std::tuple<at::Tensor,at::Tensor> rrelu_with_noise_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
27728   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27729   auto maybe_layer = maybeCurrentDynamicLayer();
27730   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27731   int64_t cur_level = maybe_layer->layerId();
27732   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
27733     return at::_ops::rrelu_with_noise_functional::call(self, noise, lower, upper, training, generator);
27734   }
27735   auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
27736   auto [noise_value, noise_bdim] = unwrapTensorAtLevel(noise, cur_level);
27737   auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
27738   return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
27739 }
27740 template <typename batch_rule_t, batch_rule_t batch_rule>
27741 void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
27742   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27743   auto maybe_layer = maybeCurrentDynamicLayer();
27744   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27745   int64_t cur_level = maybe_layer->layerId();
27746   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27747     return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
27748   }
27749   std::optional<Tensor> grad_scale_value;
27750   std::optional<int64_t> grad_scale_bdim;
27751   if (grad_scale) {
27752       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27753   }
27754   std::optional<Tensor> found_inf_value;
27755   std::optional<int64_t> found_inf_bdim;
27756   if (found_inf) {
27757       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27758   }
27759   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
27760 }
27761 template <typename batch_rule_t, batch_rule_t batch_rule>
27762 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
27763   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27764   auto maybe_layer = maybeCurrentDynamicLayer();
27765   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27766   int64_t cur_level = maybe_layer->layerId();
27767   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
27768     return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
27769   }
27770   std::optional<Tensor> grad_scale_value;
27771   std::optional<int64_t> grad_scale_bdim;
27772   if (grad_scale) {
27773       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27774   }
27775   std::optional<Tensor> found_inf_value;
27776   std::optional<int64_t> found_inf_bdim;
27777   if (found_inf) {
27778       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27779   }
27780   auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
27781   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
27782 }
27783 template <typename batch_rule_t, batch_rule_t batch_rule>
27784 void _fused_adam_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
27785   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27786   auto maybe_layer = maybeCurrentDynamicLayer();
27787   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27788   int64_t cur_level = maybe_layer->layerId();
27789   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27790     return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
27791   }
27792   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
27793   std::optional<Tensor> grad_scale_value;
27794   std::optional<int64_t> grad_scale_bdim;
27795   if (grad_scale) {
27796       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27797   }
27798   std::optional<Tensor> found_inf_value;
27799   std::optional<int64_t> found_inf_bdim;
27800   if (found_inf) {
27801       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27802   }
27803   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
27804 }
27805 template <typename batch_rule_t, batch_rule_t batch_rule>
27806 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
27807   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27808   auto maybe_layer = maybeCurrentDynamicLayer();
27809   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27810   int64_t cur_level = maybe_layer->layerId();
27811   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
27812     return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
27813   }
27814   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
27815   std::optional<Tensor> grad_scale_value;
27816   std::optional<int64_t> grad_scale_bdim;
27817   if (grad_scale) {
27818       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27819   }
27820   std::optional<Tensor> found_inf_value;
27821   std::optional<int64_t> found_inf_bdim;
27822   if (found_inf) {
27823       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27824   }
27825   auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
27826   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
27827 }
27828 template <typename batch_rule_t, batch_rule_t batch_rule>
27829 void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
27830   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27831   auto maybe_layer = maybeCurrentDynamicLayer();
27832   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27833   int64_t cur_level = maybe_layer->layerId();
27834   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27835     return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
27836   }
27837   std::optional<Tensor> grad_scale_value;
27838   std::optional<int64_t> grad_scale_bdim;
27839   if (grad_scale) {
27840       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27841   }
27842   std::optional<Tensor> found_inf_value;
27843   std::optional<int64_t> found_inf_bdim;
27844   if (found_inf) {
27845       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27846   }
27847   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
27848 }
27849 template <typename batch_rule_t, batch_rule_t batch_rule>
27850 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
27851   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27852   auto maybe_layer = maybeCurrentDynamicLayer();
27853   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27854   int64_t cur_level = maybe_layer->layerId();
27855   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
27856     return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
27857   }
27858   std::optional<Tensor> grad_scale_value;
27859   std::optional<int64_t> grad_scale_bdim;
27860   if (grad_scale) {
27861       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27862   }
27863   std::optional<Tensor> found_inf_value;
27864   std::optional<int64_t> found_inf_bdim;
27865   if (found_inf) {
27866       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27867   }
27868   auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
27869   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
27870 }
27871 template <typename batch_rule_t, batch_rule_t batch_rule>
27872 void _fused_adamw_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
27873   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27874   auto maybe_layer = maybeCurrentDynamicLayer();
27875   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27876   int64_t cur_level = maybe_layer->layerId();
27877   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27878     return at::_ops::_fused_adamw_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
27879   }
27880   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
27881   std::optional<Tensor> grad_scale_value;
27882   std::optional<int64_t> grad_scale_bdim;
27883   if (grad_scale) {
27884       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27885   }
27886   std::optional<Tensor> found_inf_value;
27887   std::optional<int64_t> found_inf_bdim;
27888   if (found_inf) {
27889       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27890   }
27891   batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
27892 }
27893 template <typename batch_rule_t, batch_rule_t batch_rule>
27894 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
27895   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27896   auto maybe_layer = maybeCurrentDynamicLayer();
27897   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27898   int64_t cur_level = maybe_layer->layerId();
27899   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
27900     return at::_ops::_fused_adamw_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
27901   }
27902   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
27903   std::optional<Tensor> grad_scale_value;
27904   std::optional<int64_t> grad_scale_bdim;
27905   if (grad_scale) {
27906       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27907   }
27908   std::optional<Tensor> found_inf_value;
27909   std::optional<int64_t> found_inf_bdim;
27910   if (found_inf) {
27911       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27912   }
27913   auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
27914   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
27915 }
27916 template <typename batch_rule_t, batch_rule_t batch_rule>
27917 void _fused_sgd_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
27918   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27919   auto maybe_layer = maybeCurrentDynamicLayer();
27920   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27921   int64_t cur_level = maybe_layer->layerId();
27922   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27923     return at::_ops::_fused_sgd_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
27924   }
27925   std::optional<Tensor> grad_scale_value;
27926   std::optional<int64_t> grad_scale_bdim;
27927   if (grad_scale) {
27928       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27929   }
27930   std::optional<Tensor> found_inf_value;
27931   std::optional<int64_t> found_inf_bdim;
27932   if (found_inf) {
27933       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27934   }
27935   batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
27936 }
27937 template <typename batch_rule_t, batch_rule_t batch_rule>
27938 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
27939   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27940   auto maybe_layer = maybeCurrentDynamicLayer();
27941   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27942   int64_t cur_level = maybe_layer->layerId();
27943   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
27944     return at::_ops::_fused_sgd::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
27945   }
27946   std::optional<Tensor> grad_scale_value;
27947   std::optional<int64_t> grad_scale_bdim;
27948   if (grad_scale) {
27949       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27950   }
27951   std::optional<Tensor> found_inf_value;
27952   std::optional<int64_t> found_inf_bdim;
27953   if (found_inf) {
27954       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27955   }
27956   auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
27957   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
27958 }
27959 template <typename batch_rule_t, batch_rule_t batch_rule>
27960 void _fused_sgd_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
27961   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27962   auto maybe_layer = maybeCurrentDynamicLayer();
27963   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
27964   int64_t cur_level = maybe_layer->layerId();
27965   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
27966     return at::_ops::_fused_sgd_tensor_lr_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
27967   }
27968   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
27969   std::optional<Tensor> grad_scale_value;
27970   std::optional<int64_t> grad_scale_bdim;
27971   if (grad_scale) {
27972       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27973   }
27974   std::optional<Tensor> found_inf_value;
27975   std::optional<int64_t> found_inf_bdim;
27976   if (found_inf) {
27977       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
27978   }
27979   batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
27980 }
27981 template <typename batch_rule_t, batch_rule_t batch_rule>
27982 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
27983   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27984   auto maybe_layer = maybeCurrentDynamicLayer();
27985   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27986   int64_t cur_level = maybe_layer->layerId();
27987   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
27988     return at::_ops::_fused_sgd_tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
27989   }
27990   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
27991   std::optional<Tensor> grad_scale_value;
27992   std::optional<int64_t> grad_scale_bdim;
27993   if (grad_scale) {
27994       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
27995   }
27996   std::optional<Tensor> found_inf_value;
27997   std::optional<int64_t> found_inf_bdim;
27998   if (found_inf) {
27999       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
28000   }
28001   auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
28002   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
28003 }
28004 template <typename batch_rule_t, batch_rule_t batch_rule>
28005 void _fused_adagrad_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
28006   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28007   auto maybe_layer = maybeCurrentDynamicLayer();
28008   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
28009   int64_t cur_level = maybe_layer->layerId();
28010   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
28011     return at::_ops::_fused_adagrad_out::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out);
28012   }
28013   std::optional<Tensor> grad_scale_value;
28014   std::optional<int64_t> grad_scale_bdim;
28015   if (grad_scale) {
28016       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
28017   }
28018   std::optional<Tensor> found_inf_value;
28019   std::optional<int64_t> found_inf_bdim;
28020   if (found_inf) {
28021       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
28022   }
28023   batch_rule(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
28024 }
28025 template <typename batch_rule_t, batch_rule_t batch_rule>
28026 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adagrad_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
28027   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28028   auto maybe_layer = maybeCurrentDynamicLayer();
28029   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28030   int64_t cur_level = maybe_layer->layerId();
28031   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
28032     return at::_ops::_fused_adagrad::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
28033   }
28034   std::optional<Tensor> grad_scale_value;
28035   std::optional<int64_t> grad_scale_bdim;
28036   if (grad_scale) {
28037       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
28038   }
28039   std::optional<Tensor> found_inf_value;
28040   std::optional<int64_t> found_inf_bdim;
28041   if (found_inf) {
28042       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
28043   }
28044   auto results = batch_rule(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
28045   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
28046 }
28047 template <typename batch_rule_t, batch_rule_t batch_rule>
28048 void _fused_adagrad_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, const at::Tensor & lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
28049   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28050   auto maybe_layer = maybeCurrentDynamicLayer();
28051   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
28052   int64_t cur_level = maybe_layer->layerId();
28053   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
28054     return at::_ops::_fused_adagrad_tensor_lr_out::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out);
28055   }
28056   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
28057   std::optional<Tensor> grad_scale_value;
28058   std::optional<int64_t> grad_scale_bdim;
28059   if (grad_scale) {
28060       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
28061   }
28062   std::optional<Tensor> found_inf_value;
28063   std::optional<int64_t> found_inf_bdim;
28064   if (found_inf) {
28065       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
28066   }
28067   batch_rule(self, grads, state_sums, state_steps, lr_value, lr_bdim, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
28068 }
28069 template <typename batch_rule_t, batch_rule_t batch_rule>
28070 ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adagrad_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, const at::Tensor & lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
28071   c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28072   auto maybe_layer = maybeCurrentDynamicLayer();
28073   vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28074   int64_t cur_level = maybe_layer->layerId();
28075   if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
28076     return at::_ops::_fused_adagrad_tensor_lr::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
28077   }
28078   auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level);
28079   std::optional<Tensor> grad_scale_value;
28080   std::optional<int64_t> grad_scale_bdim;
28081   if (grad_scale) {
28082       std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
28083   }
28084   std::optional<Tensor> found_inf_value;
28085   std::optional<int64_t> found_inf_bdim;
28086   if (found_inf) {
28087       std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
28088   }
28089   auto results = batch_rule(self, grads, state_sums, state_steps, lr_value, lr_bdim, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
28090   return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
28091 }
28092 
28093 }} // namespace at::functorch
28094